From 3b8d392548263d94e74fa6f8b4c3c9fa25ba6e90 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 23 Aug 2024 11:38:22 +0200 Subject: [PATCH 01/45] [FIX] pytest warnings Backport of d7a78f89d0877e1688f999e1d188fc519e5049d5 - `choice` is not a proper type - markers should be declared --- conftest.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/conftest.py b/conftest.py index 1357fdf3..539cf272 100644 --- a/conftest.py +++ b/conftest.py @@ -86,7 +86,7 @@ def pytest_addoption(parser): parser.addoption('--coverage', action='store_true') parser.addoption( - '--tunnel', action="store", type="choice", choices=['', 'ngrok', 'localtunnel'], default='', + '--tunnel', action="store", choices=['', 'ngrok', 'localtunnel'], default='', help="Which tunneling method to use to expose the local Odoo server " "to hook up github's webhook. ngrok is more reliable, but " "creating a free account is necessary to avoid rate-limiting " @@ -104,6 +104,11 @@ def pytest_configure(config): "markers", "expect_log_errors(reason): allow and require tracebacks in the log", ) + config.addinivalue_line( + "markers", + "defaultstatuses: use the statuses `default` rather than `ci/runbot,legal/cla`", + ) + def pytest_unconfigure(config): if not is_manager(config): From 1d106f552d46722129d68477b626a0a1aeb3494c Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Thu, 5 Sep 2024 13:02:10 +0200 Subject: [PATCH 02/45] [FIX] runbot_merge: missing feedback on fw r+ In some cases, feedback to the PR author that an r+ is redundant went missing. This turns out to be due to the convolution of the handling of approval on forward-port, and the fact that the target PR is treated exactly like its ancestors: if the PR is already approved the approval is not even attempted (and so no feedback if it's incorrect). Straighten up this bit and add a special case for the PR being commented on, it should have the usual feedback if in error or already commented on. Furthermore, update `PullRequests._pr_acl` to kinda work out of the box for forward-port: if the current PR is a forward port, `is_reviewer` should check delegation on all ancestors, there doesn't seem to be any reason to split "source_reviewer", "parent_reviewer", and "is_reviewer". Fixes #939 --- forwardport/tests/test_simple.py | 1 + forwardport/tests/test_weird.py | 58 +++++++++++++++++++ runbot_merge/models/commands.py | 10 +++- runbot_merge/models/pull_requests.py | 86 ++++++++++------------------ 4 files changed, 98 insertions(+), 57 deletions(-) diff --git a/forwardport/tests/test_simple.py b/forwardport/tests/test_simple.py index 634a25c0..187a1688 100644 --- a/forwardport/tests/test_simple.py +++ b/forwardport/tests/test_simple.py @@ -692,6 +692,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port '''.format_map(users)), (users['other'], 'hansen r+') ] + assert pr2_id.reviewed_by def test_redundant_approval(env, config, make_repo, users): diff --git a/forwardport/tests/test_weird.py b/forwardport/tests/test_weird.py index 19feda24..e2fd04a7 100644 --- a/forwardport/tests/test_weird.py +++ b/forwardport/tests/test_weird.py @@ -136,6 +136,64 @@ def test_failed_staging(env, config, make_repo): assert pr3_id.state == 'ready' assert pr3_id.staging_id +def test_fw_retry(env, config, make_repo, users): + prod, _ = make_basic(env, config, make_repo, statuses='default') + other_token = config['role_other']['token'] + fork = prod.fork(token=other_token) + with prod, fork: + fork.make_commits('a', Commit('c', tree={'a': '0'}), ref='heads/abranch') + pr1 = prod.make_pr( + title="whatever", + target='a', + head=f'{fork.owner}:abranch', + token=other_token, + ) + prod.post_status(pr1.head, 'success') + pr1.post_comment('hansen r+', config['role_reviewer']['token']) + env.run_crons() + + other_partner = env['res.partner'].search([('github_login', '=', users['other'])]) + assert len(other_partner) == 1 + other_partner.email = "foo@example.com" + + with prod: + prod.post_status('staging.a', 'success') + env.run_crons() + + _pr1_id, pr2_id = env['runbot_merge.pull_requests'].search([], order='number') + pr2 = prod.get_pr(pr2_id.number) + with prod: + prod.post_status(pr2_id.head, 'success') + pr2.post_comment('hansen r+', other_token) + env.run_crons() + assert not pr2_id.blocked + + with prod: + prod.post_status('staging.b', 'failure') + env.run_crons() + + assert pr2_id.error + with prod: + pr2.post_comment('hansen r+', other_token) + env.run_crons() + assert pr2_id.state == 'error' + with prod: + pr2.post_comment('hansen retry', other_token) + env.run_crons() + assert pr2_id.state == 'ready' + + assert pr2.comments == [ + seen(env, pr2, users), + (users['user'], "This PR targets b and is part of the forward-port chain. Further PRs will be created up to c.\n\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n"), + (users['other'], 'hansen r+'), + (users['user'], "@{other} @{reviewer} staging failed: default".format_map(users)), + + (users['other'], 'hansen r+'), + (users['user'], "This PR is already reviewed, it's in error, you might want to `retry` it instead (if you have already confirmed the error is not legitimate)."), + + (users['other'], 'hansen retry'), + ] + class TestNotAllBranches: """ Check that forward-ports don't behave completely insanely when not all branches are supported on all repositories. diff --git a/runbot_merge/models/commands.py b/runbot_merge/models/commands.py index fdee31c6..9e3c2963 100644 --- a/runbot_merge/models/commands.py +++ b/runbot_merge/models/commands.py @@ -72,6 +72,14 @@ class Approve: return f"r={ids}" return 'review+' + def __contains__(self, item): + if self.ids is None: + return True + return item in self.ids + + def fmt(self): + return ", ".join(f"#{n:d}" for n in (self.ids or ())) + @classmethod def help(cls, _: bool) -> Iterator[Tuple[str, str]]: yield "r(eview)+", "approves the PR, if it's a forwardport also approves all non-detached parents" @@ -184,7 +192,7 @@ class FW(enum.Enum): DEFAULT = enum.auto() NO = enum.auto() SKIPCI = enum.auto() - SKIPMERGE = enum.auto() + # SKIPMERGE = enum.auto() def __str__(self) -> str: return f'fw={self.name.lower()}' diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index c2b243bb..cb7670e2 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -679,14 +679,14 @@ class PullRequests(models.Model): }) is_admin, is_reviewer, is_author = self._pr_acl(author) - _source_admin, source_reviewer, source_author = self.source_id._pr_acl(author) + source_author = self.source_id._pr_acl(author).is_author # nota: 15.0 `has_group` completely doesn't work if the recordset is empty super_admin = is_admin and author.user_ids and author.user_ids.has_group('runbot_merge.group_admin') help_list: list[type(commands.Command)] = list(filter(None, [ commands.Help, - (self.source_id and (source_author or source_reviewer) or is_reviewer) and not self.reviewed_by and commands.Approve, + (is_reviewer or (self.source_id and source_author)) and not self.reviewed_by and commands.Approve, (is_author or source_author) and self.reviewed_by and commands.Reject, (is_author or source_author) and self.error and commands.Retry, @@ -768,55 +768,21 @@ For your own safety I've ignored *everything in your entire comment*. match command: case commands.Approve() if self.draft: msg = "draft PRs can not be approved." - case commands.Approve() if self.source_id: - # rules are a touch different for forwardport PRs: - valid = lambda _: True if command.ids is None else lambda n: n in command.ids - _, source_reviewer, source_author = self.source_id._pr_acl(author) - - ancestors = list(self._iter_ancestors()) - # - reviewers on the original can approve any forward port - if source_reviewer: - approveable = ancestors - elif source_author: - # give full review rights on all forwardports (attached - # or not) to original author - approveable = ancestors + case commands.Approve() if self.source_id and (source_author or is_reviewer): + if selected := [p for p in self._iter_ancestors() if p.number in command]: + for pr in selected: + # ignore already reviewed PRs, unless it's the one + # being r+'d, this means ancestors in error will not + # be warned about + if pr == self or not pr.reviewed_by: + pr._approve(author, login) else: - # between the first merged ancestor and self - mergeors = list(itertools.dropwhile( - lambda p: p.state != 'merged', - reversed(ancestors), - )) - # between the first ancestor the current user can review and self - reviewors = list(itertools.dropwhile( - lambda p: not p._pr_acl(author).is_reviewer, - reversed(ancestors), - )) - - # source author can approve any descendant of a merged - # forward port (or source), people with review rights - # to a forward port have review rights to its - # descendants, if both apply use the most favorable - # (largest number of PRs) - if source_author and len(mergeors) > len(reviewors): - approveable = mergeors - else: - approveable = reviewors - - if approveable: - for pr in approveable: - if not (pr.state in RPLUS and valid(pr.number)): - continue - msg = pr._approve(author, login) - if msg: - break - else: - msg = f"you can't {command} you silly little bean." + msg = f"tried to approve PRs {command.fmt()} but no such PR is an ancestors of {self.number}" case commands.Approve() if is_reviewer: - if command.ids is not None and command.ids != [self.number]: - msg = f"tried to approve PRs {command.ids} but the current PR is {self.number}" - else: + if command.ids is None or command.ids == [self.number]: msg = self._approve(author, login) + else: + msg = f"tried to approve PRs {command.fmt()} but the current PR is {self.number}" case commands.Reject() if is_author or source_author: if self.batch_id.skipchecks or self.reviewed_by: if self.error: @@ -916,10 +882,10 @@ For your own safety I've ignored *everything in your entire comment*. message = "Disabled forward-porting." case commands.FW.DEFAULT if is_author or source_author: message = "Waiting for CI to create followup forward-ports." - case commands.FW.SKIPCI if is_reviewer or source_reviewer: + case commands.FW.SKIPCI if is_reviewer: message = "Not waiting for CI to create followup forward-ports." - case commands.FW.SKIPMERGE if is_reviewer or source_reviewer: - message = "Not waiting for merge to create followup forward-ports." + # case commands.FW.SKIPMERGE if is_reviewer: + # message = "Not waiting for merge to create followup forward-ports." case _: msg = f"you don't have the right to {command}." @@ -1142,7 +1108,7 @@ For your own safety I've ignored *everything in your entire comment*. kw['bodies'] = {p.id: html_escape(message) for p in self} return super()._message_log_batch(**kw) - def _pr_acl(self, user): + def _pr_acl(self, user) -> ACL: if not self: return ACL(False, False, False) @@ -1151,10 +1117,18 @@ For your own safety I've ignored *everything in your entire comment*. ('repository_id', '=', self.repository.id), ('review', '=', True) if self.author != user else ('self_review', '=', True), ]) == 1 - is_reviewer = is_admin or self in user.delegate_reviewer - # TODO: should delegate reviewers be able to retry PRs? - is_author = is_reviewer or self.author == user - return ACL(is_admin, is_reviewer, is_author) + if is_admin: + return ACL(True, True, True) + + # delegate on source = delegate on PR + if self.source_id and self.source_id in user.delegate_reviewer: + return ACL(False, True, True) + # delegate on any ancestors ~ delegate on PR (maybe should be any descendant of source?) + if any(p in user.delegate_reviewer for p in self._iter_ancestors()): + return ACL(False, True, True) + + # user is probably always False on a forward port + return ACL(False, False, self.author == user) def _validate(self, statuses): # could have two PRs (e.g. one open and one closed) at least From 017b8d4bb04588d5760de21a149b1fa0e38b2126 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 6 Sep 2024 12:33:51 +0200 Subject: [PATCH 03/45] [FIX] forwardport: don't post reminder on PRs waiting to be staged The FW reminder is useful to remind people of the outstanding forward ports they need to process, as taking too long can be an issue. They are, however, not useful if the developer has already done everything, the PR is ready and unblocked, but the mergebot has fallen behind and has a hard time catching up. In that case there is nothing for the developer to do, so pinging them is not productive, it's only frustrating. Fixes #930 --- forwardport/models/project.py | 9 +++---- forwardport/tests/test_simple.py | 43 +++++++++++++++++++++----------- 2 files changed, 32 insertions(+), 20 deletions(-) diff --git a/forwardport/models/project.py b/forwardport/models/project.py index 72630453..c3d4806d 100644 --- a/forwardport/models/project.py +++ b/forwardport/models/project.py @@ -484,6 +484,8 @@ stderr: ('source_id', '!=', False), # active ('state', 'not in', ['merged', 'closed']), + # not ready + ('blocked', '!=', False), ('source_id.merge_date', '<', cutoff), ], order='source_id, id'), lambda p: p.source_id) @@ -498,11 +500,8 @@ stderr: continue source.reminder_backoff_factor += 1 - # only keep the PRs which don't have an attached descendant) - pr_ids = {p.id for p in prs} - for pr in prs: - pr_ids.discard(pr.parent_id.id) - for pr in (p for p in prs if p.id in pr_ids): + # only keep the PRs which don't have an attached descendant + for pr in set(prs).difference(p.parent_id for p in prs): self.env.ref('runbot_merge.forwardport.reminder')._send( repository=pr.repository, pull_request=pr.number, diff --git a/forwardport/tests/test_simple.py b/forwardport/tests/test_simple.py index 187a1688..0c4260c2 100644 --- a/forwardport/tests/test_simple.py +++ b/forwardport/tests/test_simple.py @@ -251,7 +251,7 @@ def test_empty(env, config, make_repo, users): """ Cherrypick of an already cherrypicked (or separately implemented) commit -> conflicting pr. """ - prod, other = make_basic(env, config, make_repo) + prod, other = make_basic(env, config, make_repo, statuses="default") # merge change to b with prod: [p_0] = prod.make_commits( @@ -259,13 +259,11 @@ def test_empty(env, config, make_repo, users): ref='heads/early' ) pr0 = prod.make_pr(target='b', head='early') - prod.post_status(p_0, 'success', 'legal/cla') - prod.post_status(p_0, 'success', 'ci/runbot') + prod.post_status(p_0, 'success') pr0.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() with prod: - prod.post_status('staging.b', 'success', 'legal/cla') - prod.post_status('staging.b', 'success', 'ci/runbot') + prod.post_status('staging.b', 'success') # merge same change to a afterwards with prod: @@ -274,15 +272,13 @@ def test_empty(env, config, make_repo, users): ref='heads/late' ) pr1 = prod.make_pr(target='a', head='late') - prod.post_status(p_1, 'success', 'legal/cla') - prod.post_status(p_1, 'success', 'ci/runbot') + prod.post_status(p_1, 'success') pr1.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() with prod: - prod.post_status('staging.a', 'success', 'legal/cla') - prod.post_status('staging.a', 'success', 'ci/runbot') - + prod.post_status('staging.a', 'success') env.run_crons() + assert prod.read_tree(prod.commit('a')) == { 'f': 'e', 'x': '0', @@ -315,7 +311,8 @@ def test_empty(env, config, make_repo, users): } with prod: - validate_all([prod], [fp_id.head, fail_id.head]) + prod.post_status(fp_id.head, 'success') + prod.post_status(fail_id.head, 'success') env.run_crons() # should not have created any new PR @@ -379,12 +376,28 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port (users['reviewer'], 'hansen r+'), seen(env, pr1, users), ] - assert fail_pr.comments == [ - seen(env, fail_pr, users), - conflict, + assert fail_pr.comments[2:] == [awaiting]*2,\ + "message should not be triggered on closed PR" + + with prod: + fail_pr.open() + with prod: + prod.post_status(fail_id.head, 'success') + assert fail_id.state == 'validated' + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + assert fail_pr.comments[2:] == [awaiting]*3, "check that message triggers again" + + with prod: + fail_pr.post_comment('hansen r+', config['role_reviewer']['token']) + assert fail_id.state == 'ready' + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + assert fail_pr.comments[2:] == [ awaiting, awaiting, - ], "each cron run should trigger a new message" + awaiting, + (users['reviewer'], "hansen r+"), + ],"if a PR is ready (unblocked), the reminder should not trigger as "\ + "we're likely waiting for the mergebot" def test_partially_empty(env, config, make_repo): """ Check what happens when only some commits of the PR are now empty From 64f9dcbc228e73f2d0acbf39c475db47672874ce Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 6 Sep 2024 13:04:13 +0200 Subject: [PATCH 04/45] [FIX] *: unnecessary warning on r- of forward port Reminding users that `r-` on a forward port only unreviews *that* forwardport is useful, as `r+;r-` is not a no-op (all preceding siblings are still reviewed). However it is useless if all siblings are not approved or already merged. So avoid sending the warning in that case. Fixes #934 --- forwardport/tests/test_simple.py | 8 ++++++++ runbot_merge/models/pull_requests.py | 8 +++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/forwardport/tests/test_simple.py b/forwardport/tests/test_simple.py index 0c4260c2..7ec2601c 100644 --- a/forwardport/tests/test_simple.py +++ b/forwardport/tests/test_simple.py @@ -629,6 +629,14 @@ def test_disapproval(env, config, make_repo, users): "sibling forward ports may have to be unapproved " "individually."), ] + with prod: + pr2.post_comment('hansen r-', token=config['role_other']['token']) + env.run_crons(None) + assert pr2_id.state == 'validated' + assert pr2.comments[2:] == [ + (users['other'], "hansen r+"), + (users['other'], "hansen r-"), + ], "shouldn't have a warning on r- because it's the only approved PR of the chain" def test_delegate_fw(env, config, make_repo, users): """If a user is delegated *on a forward port* they should be able to approve diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index cb7670e2..4bb2edd3 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -796,10 +796,12 @@ For your own safety I've ignored *everything in your entire comment*. pull_request=self.number, format_args={'user': login, 'pr': self}, ) - if self.source_id: + if self.source_id.forwardport_ids.filtered( + lambda p: p.reviewed_by and p.state not in ('merged', 'closed') + ): feedback("Note that only this forward-port has been" - " unapproved, sibling forward ports may " - "have to be unapproved individually.") + " unapproved, sibling forward ports may" + " have to be unapproved individually.") self.unstage("unreviewed (r-) by %s", login) else: msg = "r- makes no sense in the current PR state." From 146564a90a7c4ee6bc111380886e2e1ec1dabe7b Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 6 Sep 2024 13:16:37 +0200 Subject: [PATCH 05/45] [FIX] runbot_merge: set staging_end on all terminations Rather than only setting `staging_end` on status change, set it when the staging gets deactivated. This way cancelling a staging (whether explicitely or via a PR update) will also end it, so will a staging timeout, etc..., rather than keep the counter running. Fixes #931 --- runbot_merge/models/pull_requests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index 4bb2edd3..b4e7047f 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -2049,6 +2049,7 @@ class Stagings(models.Model): ._trigger(fields.Datetime.to_datetime(timeout)) if vals.get('active') is False: + vals['staging_end'] = fields.Datetime.now() self.env.ref("runbot_merge.staging_cron")._trigger() return super().write(vals) @@ -2141,7 +2142,6 @@ class Stagings(models.Model): s.state = st if s.state != 'pending': self.env.ref("runbot_merge.merge_cron")._trigger() - s.staging_end = fields.Datetime.now() if update_timeout_limit: s.timeout_limit = datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout) self.env.ref("runbot_merge.merge_cron")._trigger(s.timeout_limit) From d0723499a2d71fe1ed55d55d60cf7362351837c6 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 6 Sep 2024 13:51:55 +0200 Subject: [PATCH 06/45] [IMP] runbot_merge: stage by first ready This is an approximation under the assumption that stored computes update the `write_date`, and that there's not much else that will be computed on a batch. Eventually it might be a good idea for this to be a proper field, computed alongside the unblocking of the batch. Fixes #932 --- runbot_merge/models/stagings_create.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/runbot_merge/models/stagings_create.py b/runbot_merge/models/stagings_create.py index 55955a0a..75464045 100644 --- a/runbot_merge/models/stagings_create.py +++ b/runbot_merge/models/stagings_create.py @@ -195,11 +195,15 @@ def ready_batches(for_branch: Branch) -> Tuple[bool, Batch]: # staged through priority *and* through split. split_ids = for_branch.split_ids.batch_ids.ids env.cr.execute(""" - SELECT max(priority) - FROM runbot_merge_batch - WHERE blocked IS NULL AND target = %s AND NOT id = any(%s) + SELECT exists ( + SELECT FROM runbot_merge_batch + WHERE priority = 'alone' + AND blocked IS NULL + AND target = %s + AND NOT id = any(%s) + ) """, [for_branch.id, split_ids]) - alone = env.cr.fetchone()[0] == 'alone' + [alone] = env.cr.fetchone() return ( alone, @@ -208,7 +212,7 @@ def ready_batches(for_branch: Branch) -> Tuple[bool, Batch]: ('blocked', '=', False), ('priority', '=', 'alone') if alone else (1, '=', 1), ('id', 'not in', split_ids), - ], order="priority DESC, id ASC"), + ], order="priority DESC, write_date ASC, id ASC"), ) From 60188063f8857991014463020bfc0cd884e06187 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 6 Sep 2024 15:09:08 +0200 Subject: [PATCH 07/45] [FIX] *: ensure I don't get bollocked up again by tags Today (or really a month ago) I learned: when giving git a symbolic ref (e.g. a ref name), if it's ambiguous then 1. If `$GIT_DIR/$name` exists, that is what you mean (this is usually useful only for `HEAD`, `FETCH_HEAD`, `ORIG_HEAD`, `MERGE_HEAD`, `REBASE_HEAD`, `REVERT_HEAD`, `CHERRY_PICK_HEAD`, `BISECT_HEAD` and `AUTO_MERGE`) 2. otherwise, `refs/$name` if it exists 3. otherwise, `refs/tags/$name` if it exists 4. otherwise, `refs/heads/$name` if it exists 5. otherwise, `refs/remotes/$name` if it exists 6. otherwise, `refs/remotes/$name/HEAD` if it exists This means if a tag and a branch have the same name and only the name is provided (not the full ref), git will select the tag, which gets very confusing for the mergebot as it now tries to rebase onto the tag (which because that's not fun otherwise was not even on the branch of the same name). Fix by providing full refs to `rev-parse` when trying to retrieve the head of the target branches. And as a defense in depth opportunity, also exclude tags when fetching refs by spec: apparently fetching a specific commit does not trigger the retrieval of tags, but any sort of spec will see the tags come along for the ride even if the tags are not in any way under the fetched ref e.g. `refs/heads/*` will very much retrieve the tags despite them being located at `refs/tags/*`. Fixes #922 --- forwardport/models/project.py | 5 +++-- runbot_merge/models/project_freeze/__init__.py | 2 +- runbot_merge/models/stagings_create.py | 3 ++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/forwardport/models/project.py b/forwardport/models/project.py index c3d4806d..0031b61b 100644 --- a/forwardport/models/project.py +++ b/forwardport/models/project.py @@ -362,7 +362,8 @@ stderr: {err} """ - target_head = source.stdout().rev_parse(target_branch.name).stdout.decode().strip() + target_head = source.stdout().rev_parse(f"refs/heads/{target_branch.name}")\ + .stdout.decode().strip() commit = conf.commit_tree( tree=tree.stdout.decode().splitlines(keepends=False)[0], parents=[target_head], @@ -386,7 +387,7 @@ stderr: logger = _logger.getChild(str(self.id)).getChild('cherrypick') # target's head - head = repo.stdout().rev_parse(branch).stdout.decode().strip() + head = repo.stdout().rev_parse(f"refs/heads/{branch}").stdout.decode().strip() commits = self.commits() logger.info( diff --git a/runbot_merge/models/project_freeze/__init__.py b/runbot_merge/models/project_freeze/__init__.py index 0a39ceae..7af53f27 100644 --- a/runbot_merge/models/project_freeze/__init__.py +++ b/runbot_merge/models/project_freeze/__init__.py @@ -217,7 +217,7 @@ class FreezeWizard(models.Model): for r in self.project_id.repo_ids } for repo, copy in repos.items(): - copy.fetch(git.source_url(repo), '+refs/heads/*:refs/heads/*') + copy.fetch(git.source_url(repo), '+refs/heads/*:refs/heads/*', no_tags=True) all_prs = self.release_pr_ids.pr_id | self.bump_pr_ids.pr_id for pr in all_prs: repos[pr.repository].fetch( diff --git a/runbot_merge/models/stagings_create.py b/runbot_merge/models/stagings_create.py index 75464045..08734958 100644 --- a/runbot_merge/models/stagings_create.py +++ b/runbot_merge/models/stagings_create.py @@ -244,7 +244,8 @@ def staging_setup( # be hooked only to "proper" remote-tracking branches # (in `refs/remotes`), it doesn't seem to work here f'+refs/heads/{target.name}:refs/heads/{target.name}', - *(pr.head for pr in by_repo.get(repo, [])) + *(pr.head for pr in by_repo.get(repo, [])), + no_tags=True, ) original_heads[repo] = head staging_state[repo] = StagingSlice(gh=gh, head=head, repo=source.stdout().with_config(text=True, check=False)) From 10ca096d867463a57b1de1e5943115c047fe1238 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Mon, 16 Sep 2024 12:46:44 +0200 Subject: [PATCH 08/45] [FIX] forwardport: cron create prototype Apparently when I added these to trigger the corresponding cron the lack of decorator caused them to not work at all, at least when invoked from the interface, consequently I notably wasn't able to force a forward port via creating one such task when trying to work around #953 --- forwardport/models/forwardport.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/forwardport/models/forwardport.py b/forwardport/models/forwardport.py index e805e59a..6f9320e1 100644 --- a/forwardport/models/forwardport.py +++ b/forwardport/models/forwardport.py @@ -1,9 +1,7 @@ # -*- coding: utf-8 -*- import builtins -import contextlib import logging import re -import uuid from contextlib import ExitStack from datetime import datetime, timedelta @@ -11,7 +9,7 @@ import requests import sentry_sdk from dateutil import relativedelta -from odoo import fields, models +from odoo import api, fields, models from odoo.addons.runbot_merge import git from odoo.addons.runbot_merge.github import GH @@ -77,6 +75,7 @@ class ForwardPortTasks(models.Model, Queue): retry_after = fields.Datetime(required=True, default='1900-01-01 01:01:01') pr_id = fields.Many2one('runbot_merge.pull_requests') + @api.model_create_multi def create(self, vals_list): self.env.ref('forwardport.port_forward')._trigger() return super().create(vals_list) @@ -270,6 +269,7 @@ class UpdateQueue(models.Model, Queue): original_root = fields.Many2one('runbot_merge.pull_requests') new_root = fields.Many2one('runbot_merge.pull_requests') + @api.model_create_multi def create(self, vals_list): self.env.ref('forwardport.updates')._trigger() return super().create(vals_list) @@ -359,6 +359,7 @@ class DeleteBranches(models.Model, Queue): pr_id = fields.Many2one('runbot_merge.pull_requests') + @api.model_create_multi def create(self, vals_list): self.env.ref('forwardport.remover')._trigger(datetime.now() - MERGE_AGE) return super().create(vals_list) From fe7cd8e1f08aa6e1e30bf0b09ef8c54c08e89ed4 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Mon, 16 Sep 2024 12:48:42 +0200 Subject: [PATCH 09/45] [IMP] runbot_merge: name PR correctly on staging success Logging PRs by id is unusual, unreadable, and inconvenient. --- runbot_merge/models/pull_requests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index b4e7047f..4c175194 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -2298,7 +2298,7 @@ class Stagings(models.Model): prs = self.mapped('batch_ids.prs') logger.info( "%s FF successful, marking %s as merged", - self, prs + self, prs.mapped('display_name'), ) self.batch_ids.merge_date = fields.Datetime.now() From 851656bec0140056f3f426d6360158e9b0d7948c Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Mon, 16 Sep 2024 12:49:23 +0200 Subject: [PATCH 10/45] [IMP] runbot_merge: set status on skipchecks & use that - rather than enumerate states, forward-porting should just check if the statuses are successful on a PR - for the same consistency reasons explained in f97502e503cb52c0000ad9b153c4ddb7482c3a6d, `skipchecks` should force the status of a PR to `success`: it very odd that a PR would be ready without being validated... --- runbot_merge/models/batch.py | 2 +- runbot_merge/models/pull_requests.py | 5 ++++- runbot_merge/tests/test_oddities.py | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/runbot_merge/models/batch.py b/runbot_merge/models/batch.py index d920802f..1a96075e 100644 --- a/runbot_merge/models/batch.py +++ b/runbot_merge/models/batch.py @@ -432,7 +432,7 @@ class Batch(models.Model): _logger.info('-> no parent %s (%s)', batch, prs) continue if not force_fw and batch.source.fw_policy != 'skipci' \ - and (invalid := batch.prs.filtered(lambda p: p.state not in ['validated', 'ready'])): + and (invalid := batch.prs.filtered(lambda p: p.status != 'success')): _logger.info( '-> wrong state %s (%s)', batch, diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index 4c175194..dd27fe36 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -1163,7 +1163,7 @@ For your own safety I've ignored *everything in your entire comment*. super().modified(fnames, create, before) @api.depends( - 'statuses', 'overrides', 'target', 'parent_id', + 'statuses', 'overrides', 'target', 'parent_id', 'skipchecks', 'repository.status_ids.context', 'repository.status_ids.branch_filter', 'repository.status_ids.prs', @@ -1173,6 +1173,9 @@ For your own safety I've ignored *everything in your entire comment*. statuses = {**json.loads(pr.statuses), **pr._get_overrides()} pr.statuses_full = json.dumps(statuses, indent=4) + if pr.skipchecks: + pr.status = 'success' + continue st = 'success' for ci in pr.repository.status_ids._for_pr(pr): diff --git a/runbot_merge/tests/test_oddities.py b/runbot_merge/tests/test_oddities.py index 6bee3696..e0f530b5 100644 --- a/runbot_merge/tests/test_oddities.py +++ b/runbot_merge/tests/test_oddities.py @@ -260,7 +260,7 @@ def test_force_ready(env, repo, config): pr_id.skipchecks = True assert pr_id.state == 'ready' - assert pr_id.status == 'pending' + assert pr_id.status == 'success' reviewer = env['res.users'].browse([env._uid]).partner_id assert pr_id.reviewed_by == reviewer From a046cf2f7c1d5451fd0897a678f72520a5d9b169 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Tue, 17 Sep 2024 10:29:01 +0200 Subject: [PATCH 11/45] [FIX] *: UX around fw=no The UX around the split of limit and forward port policy (and especially moving "don't forward port" to the policy) was not really considered and caused confusion for both me and devs: after having disabled forward porting, updating the limit would not restore it, but there would be no indication of such an issue. This caused odoo/enterprise#68916 to not be forward ported at merge (despite looking for all the world like it should be), and while updating the limit post-merge did force a forward-port that inconsistency was just as jarring (also not helped by being unable to create an fw batch via the backend UI because reasons, see 10ca096d867463a57b1de1e5943115c047fe1238). Fix this along most axis: - Notify the user and reset the fw policy if the limit is updated while `fw=no`. - Trigger a forward port if the fw policy is updated (from `no`) on a merged PR, currently only sources. - Add check & warning to limit update process so it does *not* force a port (though maybe it should under the assumption that we're updating the limit anyway? We'll see). Fixes #953 --- forwardport/tests/test_limit.py | 78 ++++++++++++++++++++++++++++ runbot_merge/models/commands.py | 2 +- runbot_merge/models/pull_requests.py | 54 +++++++++++++++---- runbot_merge/tests/test_oddities.py | 1 + 4 files changed, 124 insertions(+), 11 deletions(-) diff --git a/forwardport/tests/test_limit.py b/forwardport/tests/test_limit.py index de563e3d..e5bfcd3e 100644 --- a/forwardport/tests/test_limit.py +++ b/forwardport/tests/test_limit.py @@ -76,6 +76,84 @@ def test_ignore(env, config, make_repo, users): (users['user'], "Disabled forward-porting."), ] + with prod: + pr.post_comment('hansen up to c', config['role_reviewer']['token']) + env.run_crons() + + assert env['runbot_merge.pull_requests'].search([]) == pr_id,\ + "should still not have created a forward port" + assert pr.comments[6:] == [ + (users['reviewer'], 'hansen up to c'), + (users['user'], "@%s can not forward-port, policy is 'no' on %s" % ( + users['reviewer'], + pr_id.display_name, + )) + ] + + assert pr_id.limit_id == env['runbot_merge.branch'].search([('name', '=', 'c')]) + pr_id.limit_id = False + + with prod: + pr.post_comment("hansen fw=default", config['role_reviewer']['token']) + env.run_crons() + + assert pr.comments[8:] == [ + (users['reviewer'], "hansen fw=default"), + (users['user'], "Starting forward-port. Waiting for CI to create followup forward-ports.") + ] + + assert env['runbot_merge.pull_requests'].search([('source_id', '=', pr_id.id)]),\ + "should finally have created a forward port" + + +def test_unignore(env, config, make_repo, users): + env['res.partner'].create({'name': users['other'], 'github_login': users['other'], 'email': 'other@example.org'}) + + prod, _ = make_basic(env, config, make_repo, statuses="default") + token = config['role_other']['token'] + fork = prod.fork(token=token) + with prod, fork: + [c] = fork.make_commits('a', Commit('c', tree={'0': '0'}), ref='heads/mybranch') + pr = prod.make_pr(target='a', head=f'{fork.owner}:mybranch', title="title", token=token) + prod.post_status(c, 'success') + env.run_crons() + + pr_id = to_pr(env, pr) + assert pr_id.batch_id.fw_policy == 'default' + + with prod: + pr.post_comment('hansen fw=no', token) + env.run_crons(None) + assert pr_id.batch_id.fw_policy == 'no' + + with prod: + pr.post_comment('hansen fw=default', token) + env.run_crons(None) + assert pr_id.batch_id.fw_policy == 'default' + + with prod: + pr.post_comment('hansen fw=no', token) + env.run_crons(None) + with prod: + pr.post_comment('hansen up to c', token) + env.run_crons(None) + assert pr_id.batch_id.fw_policy == 'default' + + assert pr.comments == [ + seen(env, pr, users), + (users['other'], "hansen fw=no"), + (users['user'], "Disabled forward-porting."), + (users['other'], "hansen fw=default"), + (users['user'], "Waiting for CI to create followup forward-ports."), + (users['other'], "hansen fw=no"), + (users['user'], "Disabled forward-porting."), + (users['other'], "hansen up to c"), + (users['user'], "Forward-porting to 'c'. Re-enabled forward-porting "\ + "(you should use `fw=default` to re-enable forward "\ + "porting after disabling)." + ), + ] + def test_disable(env, config, make_repo, users): """ Checks behaviour if the limit target is disabled: diff --git a/runbot_merge/models/commands.py b/runbot_merge/models/commands.py index 9e3c2963..53c268d4 100644 --- a/runbot_merge/models/commands.py +++ b/runbot_merge/models/commands.py @@ -200,8 +200,8 @@ class FW(enum.Enum): @classmethod def help(cls, is_reviewer: bool) -> Iterator[Tuple[str, str]]: yield str(cls.NO), "does not forward-port this PR" + yield str(cls.DEFAULT), "forward-ports this PR normally" if is_reviewer: - yield str(cls.DEFAULT), "forward-ports this PR normally" yield str(cls.SKIPCI), "does not wait for a forward-port's statuses to succeed before creating the next one" diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index dd27fe36..74d04261 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -9,6 +9,7 @@ import json import logging import re import time +from enum import IntEnum from functools import reduce from operator import itemgetter from typing import Optional, Union, List, Iterator, Tuple @@ -879,6 +880,7 @@ For your own safety I've ignored *everything in your entire comment*. case commands.Close() if source_author: feedback(close=True) case commands.FW(): + message = None match command: case commands.FW.NO if is_author or source_author: message = "Disabled forward-porting." @@ -890,10 +892,26 @@ For your own safety I've ignored *everything in your entire comment*. # message = "Not waiting for merge to create followup forward-ports." case _: msg = f"you don't have the right to {command}." + if message: + # TODO: feedback? + if self.source_id: + "if the pr is not a source, ignore (maybe?)" + elif not self.merge_date: + "if the PR is not merged, it'll be fw'd normally" + elif self.batch_id.fw_policy != 'no' or command == commands.FW.NO: + "if the policy is not being flipped from no to something else, nothing to do" + elif branch_key(self.limit_id) <= branch_key(self.target): + "if the limit is lower than current (old style ignore) there's nothing to do" + else: + message = f"Starting forward-port. {message}" + self.env['forwardport.batches'].create({ + 'batch_id': self.batch_id.id, + 'source': 'merge', + }) - if not msg: (self.source_id or self).batch_id.fw_policy = command.name.lower() feedback(message=message) + case commands.Limit(branch) if is_author: if branch is None: feedback(message="'ignore' is deprecated, use 'fw=no' to disable forward porting.") @@ -901,7 +919,7 @@ For your own safety I've ignored *everything in your entire comment*. for p in self.batch_id.prs: ping, m = p._maybe_update_limit(limit) - if ping and p == self: + if ping is Ping.ERROR and p == self: msg = m else: if ping: @@ -934,31 +952,37 @@ For your own safety I've ignored *everything in your entire comment*. feedback(message=f"@{login}{rejections}{footer}") return 'rejected' - def _maybe_update_limit(self, limit: str) -> Tuple[bool, str]: + def _maybe_update_limit(self, limit: str) -> Tuple[Ping, str]: limit_id = self.env['runbot_merge.branch'].with_context(active_test=False).search([ ('project_id', '=', self.repository.project_id.id), ('name', '=', limit), ]) if not limit_id: - return True, f"there is no branch {limit!r}, it can't be used as a forward port target." + return Ping.ERROR, f"there is no branch {limit!r}, it can't be used as a forward port target." if limit_id != self.target and not limit_id.active: - return True, f"branch {limit_id.name!r} is disabled, it can't be used as a forward port target." + return Ping.ERROR, f"branch {limit_id.name!r} is disabled, it can't be used as a forward port target." # not forward ported yet, just acknowledge the request if not self.source_id and self.state != 'merged': self.limit_id = limit_id if branch_key(limit_id) <= branch_key(self.target): - return False, "Forward-port disabled (via limit)." + return Ping.NO, "Forward-port disabled (via limit)." else: - return False, f"Forward-porting to {limit_id.name!r}." + suffix = '' + if self.batch_id.fw_policy == 'no': + self.batch_id.fw_policy = 'default' + suffix = " Re-enabled forward-porting (you should use "\ + "`fw=default` to re-enable forward porting "\ + "after disabling)." + return Ping.NO, f"Forward-porting to {limit_id.name!r}.{suffix}" # if the PR has been forwardported prs = (self | self.forwardport_ids | self.source_id | self.source_id.forwardport_ids) tip = max(prs, key=pr_key) # if the fp tip was closed it's fine if tip.state == 'closed': - return True, f"{tip.display_name} is closed, no forward porting is going on" + return Ping.ERROR, f"{tip.display_name} is closed, no forward porting is going on" prs.limit_id = limit_id @@ -977,7 +1001,7 @@ For your own safety I've ignored *everything in your entire comment*. except psycopg2.errors.LockNotAvailable: # row locked = port occurring and probably going to succeed, # so next(real_limit) likely a done deal already - return True, ( + return Ping.ERROR, ( f"Forward port of {tip.display_name} likely already " f"ongoing, unable to cancel, close next forward port " f"when it completes.") @@ -988,6 +1012,9 @@ For your own safety I've ignored *everything in your entire comment*. # forward porting was previously stopped at tip, and we want it to # resume if tip.state == 'merged': + if tip.batch_id.source.fw_policy == 'no': + # hack to ping the user but not rollback the transaction + return Ping.YES, f"can not forward-port, policy is 'no' on {(tip.source_id or tip).display_name}" self.env['forwardport.batches'].create({ 'batch_id': tip.batch_id.id, 'source': 'fp' if tip.parent_id else 'merge', @@ -1022,7 +1049,7 @@ For your own safety I've ignored *everything in your entire comment*. for p in (self.source_id | root) - self ]) - return False, msg + return Ping.NO, msg def _find_next_target(self) -> Optional[Branch]: @@ -1631,6 +1658,13 @@ For your own safety I've ignored *everything in your entire comment*. 'batch_id': batch.create({}).id, }) + +class Ping(IntEnum): + NO = 0 + YES = 1 + ERROR = 2 + + # ordering is a bit unintuitive because the lowest sequence (and name) # is the last link of the fp chain, reasoning is a bit more natural the # other way around (highest object is the last), especially with Python diff --git a/runbot_merge/tests/test_oddities.py b/runbot_merge/tests/test_oddities.py index e0f530b5..530a34d3 100644 --- a/runbot_merge/tests/test_oddities.py +++ b/runbot_merge/tests/test_oddities.py @@ -348,6 +348,7 @@ Currently available commands for @{user}: |-|-| |`help`|displays this help| |`fw=no`|does not forward-port this PR| +|`fw=default`|forward-ports this PR normally| |`up to `|only ports this PR forward to the specified branch (included)| |`check`|fetches or refreshes PR metadata, resets mergebot state| From c8a06601a7c223cdfc9da23f2c799b4c660f274b Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Wed, 18 Sep 2024 15:19:13 +0200 Subject: [PATCH 12/45] [FIX] *: unstage on status going from success to failure And unconditionally unstage when the HEAD of a PR is synchronised. While a rebuild on a PR which was previously staged can be a false positive (e.g. because it hit a non-derministic test the second time around) it can also be legitimate e.g. auto-rebase of an old PR to check it out. In that case the PR should be unstaged. Furthermore, as soon as the PR gets rebuilt it goes back into `approved` (because the status goes to pending and currently there's no great way to suppress that in the rebuild case without also fucking it up for the sync case). This would cause a sync on the PR to be missed (in that it would not unstage the PR), which is broken. Fix that by not checking the state of the PR beforehand, it seems to be an unnecessary remnant of older code, and not really an optimisation (or at least one likely not worth bothering with, especially as we then proceed to perform a full PR validation anyway). Fixes #950 --- forwardport/tests/test_weird.py | 5 +- mergebot_test_utils/utils.py | 14 +++++ runbot_merge/controllers/__init__.py | 3 - runbot_merge/models/pull_requests.py | 14 ++++- runbot_merge/tests/test_staging.py | 94 +++++++++++++++++++++++++++- 5 files changed, 121 insertions(+), 9 deletions(-) diff --git a/forwardport/tests/test_weird.py b/forwardport/tests/test_weird.py index e2fd04a7..b7feab33 100644 --- a/forwardport/tests/test_weird.py +++ b/forwardport/tests/test_weird.py @@ -3,7 +3,7 @@ from datetime import datetime, timedelta import pytest -from utils import seen, Commit, to_pr, make_basic +from utils import seen, Commit, to_pr, make_basic, prevent_unstaging def test_no_token(env, config, make_repo): @@ -935,7 +935,8 @@ def test_missing_magic_ref(env, config, make_repo): pr_id = to_pr(env, pr) assert pr_id.staging_id - pr_id.head = '0'*40 + with prevent_unstaging(pr_id.staging_id): + pr_id.head = '0'*40 with prod: prod.post_status('staging.a', 'success', 'legal/cla') prod.post_status('staging.a', 'success', 'ci/runbot') diff --git a/mergebot_test_utils/utils.py b/mergebot_test_utils/utils.py index c5473e93..e5c24fbd 100644 --- a/mergebot_test_utils/utils.py +++ b/mergebot_test_utils/utils.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import contextlib import itertools import re import time @@ -200,3 +201,16 @@ Signed-off-by: {pr_id.reviewed_by.formatted_email}""" def ensure_one(records): assert len(records) == 1 return records + + +@contextlib.contextmanager +def prevent_unstaging(st) -> None: + # hack: `Stagings.cancel` only cancels *active* stagings, + # so if we disable the staging, do a thing, then re-enable + # then things should work out + assert st and st.active, "preventing unstaging of not-a-staging is not useful" + st.active = False + try: + yield + finally: + st.active = True diff --git a/runbot_merge/controllers/__init__.py b/runbot_merge/controllers/__init__.py index b8005979..5dac8125 100644 --- a/runbot_merge/controllers/__init__.py +++ b/runbot_merge/controllers/__init__.py @@ -285,9 +285,6 @@ def handle_pr(env, event): _logger.error("PR %s sync %s -> %s => failure (closed)", pr_obj.display_name, pr_obj.head, pr['head']['sha']) return "It's my understanding that closed/merged PRs don't get sync'd" - if pr_obj.state == 'ready': - pr_obj.unstage("updated by %s", event['sender']['login']) - _logger.info( "PR %s sync %s -> %s by %s => reset to 'open' and squash=%s", pr_obj.display_name, diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index 74d04261..577866cb 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -1164,7 +1164,7 @@ For your own safety I've ignored *everything in your entire comment*. # temporarily on the same head, or on the same head with different # targets updateable = self.filtered(lambda p: not p.merge_date) - updateable.statuses = statuses + updateable.statuses = statuses or '{}' for pr in updateable: if pr.status == "failure": statuses = json.loads(pr.statuses_full) @@ -1212,6 +1212,9 @@ For your own safety I've ignored *everything in your entire comment*. break if v == 'pending': st = 'pending' + if pr.status != 'failure' and st == 'failure': + pr.unstage("had CI failure after staging") + pr.status = st @api.depends( @@ -1346,7 +1349,7 @@ For your own safety I've ignored *everything in your entire comment*. pr = super().create(vals) c = self.env['runbot_merge.commit'].search([('sha', '=', pr.head)]) - pr._validate(c.statuses or '{}') + pr._validate(c.statuses) if pr.state not in ('closed', 'merged'): self.env.ref('runbot_merge.pr.created')._send( @@ -1423,8 +1426,13 @@ For your own safety I've ignored *everything in your entire comment*. newhead = vals.get('head') if newhead: + if pid := self.env.cr.precommit.data.get('change-author'): + writer = self.env['res.partner'].browse(pid) + else: + writer = self.env.user.partner_id + self.unstage("updated by %s", writer.github_login or writer.name) c = self.env['runbot_merge.commit'].search([('sha', '=', newhead)]) - self._validate(c.statuses or '{}') + self._validate(c.statuses) return w def _check_linked_prs_statuses(self, commit=False): diff --git a/runbot_merge/tests/test_staging.py b/runbot_merge/tests/test_staging.py index c1aeeeeb..7dc98713 100644 --- a/runbot_merge/tests/test_staging.py +++ b/runbot_merge/tests/test_staging.py @@ -1,4 +1,4 @@ -from utils import Commit, to_pr +from utils import Commit, to_pr, make_basic, prevent_unstaging def test_staging_disabled_branch(env, project, repo, config): @@ -26,3 +26,95 @@ def test_staging_disabled_branch(env, project, repo, config): "master is allowed to stage, should be staged" assert not to_pr(env, other_pr).staging_id, \ "other is *not* allowed to stage, should not be staged" + + +def test_staged_failure(env, config, repo, users): + """If a PR is staged and gets a new CI failure, it should be unstaged + + This was an issue with odoo/odoo#165931 which got rebuilt and that triggered + a failure, which made the PR !ready but kept the staging going. So the PR + ended up in an odd state of being both staged and not ready. + + And while the CI failure it got was a false positive, it was in fact the + problematic PR breaking that staging. + + More relevant the runbot's "automatic rebase" mode sends CI to the original + commits so receiving legitimate failures after staging very much makes + sense e.g. an old PR is staged, the staging starts failing, somebody notices + the outdated PR and triggers autorebase, which fails (because something + incompatible was merged in the meantime), the PR *should* be unstaged. + """ + with repo: + repo.make_commits(None, Commit("master", tree={'a': '1'}), ref="heads/master") + + repo.make_commits('master', Commit('c', tree={'a': 'b'}), ref="heads/mybranch") + pr = repo.make_pr(target='master', head='mybranch') + repo.post_status('mybranch', 'success') + pr.post_comment('hansen r+', config['role_reviewer']['token']) + env.run_crons() + + pr_id = to_pr(env, pr) + staging = pr_id.staging_id + assert staging, "pr should be staged" + + with repo: + # started rebuild, nothing should happen + repo.post_status('mybranch', 'pending') + env.run_crons() + assert pr_id.staging_id + # can't find a clean way to keep this "ready" when transitioning from + # success to pending *without updating the head*, at least not without + # adding a lot of contextual information around `_compute_statuses` + # assert pr_id.state == 'ready' + + with repo: + # rebuild failed omg! + repo.post_status('mybranch', 'failure') + env.run_crons() + + assert pr_id.status == 'failure' + assert pr_id.state == 'approved' + + assert not pr_id.staging_id, "pr should be unstaged" + assert staging.state == "cancelled" + assert staging.reason == f"{pr_id.display_name} had CI failure after staging" + + +def test_update_unready(env, config, repo, users): + """Less likely with `test_staged_failure` fixing most of the issue, but + clearly the assumption that a staged PR will be `ready` is not strictly + enforced. + + As such, updating the PR should try to `unstage` it no matter what state + it's in, this will lead to slightly higher loads on sync but loads on the + mergebot are generally non-existent outside of the git maintenance cron, + and there are doubtless other optimisations missing, or that (and other + items) can be done asynchronously. + """ + with repo: + repo.make_commits(None, Commit("master", tree={'a': '1'}), ref="heads/master") + + repo.make_commits('master', Commit('c', tree={'a': 'b'}), ref="heads/mybranch") + pr = repo.make_pr(target='master', head='mybranch') + repo.post_status('mybranch', 'success') + pr.post_comment('hansen r+', config['role_reviewer']['token']) + + [c2] = repo.make_commits('master', Commit('c', tree={'a': 'c'})) + env.run_crons() + + pr_id = to_pr(env, pr) + staging = pr_id.staging_id + assert staging, "pr should be staged" + + with prevent_unstaging(pr_id.staging_id): + pr_id.overrides = '{"default": {"state": "failure"}}' + assert pr_id.state == "approved" + assert pr_id.staging_id, "pr should not have been unstaged because we cheated" + + with repo: + repo.update_ref("heads/mybranch", c2, force=True) + env.run_crons() + + assert not pr_id.staging_id, "pr should be unstaged" + assert staging.state == "cancelled" + assert staging.reason == f"{pr_id.display_name} updated by {users['user']}" From 154e610bbc49853fd2ecc8a926c2976d54891fc8 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Thu, 19 Sep 2024 12:17:59 +0200 Subject: [PATCH 13/45] [IMP] *: modernize `TestPRUpdate` - Switch to just `default` ci. - Autouse fixture to init the master branch. - Switch to `make_commits`. - Merge `test_reopen_update` and `test_update_closed_revalidate` into `test_update_closed`: the former did basically nothing useful and the latter could easily be folded into `test_update_closed` by just validating the new commit. --- forwardport/tests/test_weird.py | 8 +- runbot_merge/tests/test_basic.py | 252 ++++++++++++------------------- 2 files changed, 96 insertions(+), 164 deletions(-) diff --git a/forwardport/tests/test_weird.py b/forwardport/tests/test_weird.py index b7feab33..9577f690 100644 --- a/forwardport/tests/test_weird.py +++ b/forwardport/tests/test_weird.py @@ -920,13 +920,12 @@ def test_missing_magic_ref(env, config, make_repo): Emulate this behaviour by updating the PR with a commit which lives in the repo but has no ref. """ - prod, _ = make_basic(env, config, make_repo) + prod, _ = make_basic(env, config, make_repo, statuses='default') a_head = prod.commit('refs/heads/a') with prod: [c] = prod.make_commits(a_head.id, Commit('x', tree={'x': '0'}), ref='heads/change') pr = prod.make_pr(target='a', head='change') - prod.post_status(c, 'success', 'legal/cla') - prod.post_status(c, 'success', 'ci/runbot') + prod.post_status(c, 'success') pr.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() @@ -938,8 +937,7 @@ def test_missing_magic_ref(env, config, make_repo): with prevent_unstaging(pr_id.staging_id): pr_id.head = '0'*40 with prod: - prod.post_status('staging.a', 'success', 'legal/cla') - prod.post_status('staging.a', 'success', 'ci/runbot') + prod.post_status('staging.a', 'success') env.run_crons() assert not pr_id.staging_id diff --git a/runbot_merge/tests/test_basic.py b/runbot_merge/tests/test_basic.py index 18d75d73..e59b8bf4 100644 --- a/runbot_merge/tests/test_basic.py +++ b/runbot_merge/tests/test_basic.py @@ -2191,83 +2191,53 @@ Co-authored-by: {other_user['name']} <{other_user['email']}>\ } -class TestPRUpdate(object): +class TestPRUpdate: """ Pushing on a PR should update the HEAD except for merged PRs, it can have additional effect (see individual tests) """ + @pytest.fixture(autouse=True) + def master(self, repo): + with repo: + [m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref="heads/master") + return m + def test_update_opened(self, env, repo): with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/master', m) - - c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) + [c] = repo.make_commits("master", Commit('fist', tree={'m': 'c1'})) + prx = repo.make_pr(target='master', head=c) pr = to_pr(env, prx) assert pr.head == c # alter & push force PR entirely with repo: - c2 = repo.make_commit(m, 'first', None, tree={'m': 'cc'}) - repo.update_ref(prx.ref, c2, force=True) - assert pr.head == c2 - - def test_reopen_update(self, env, repo, config): - with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/master', m) - - c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) - prx.post_comment("hansen r+", config['role_reviewer']['token']) - - pr = to_pr(env, prx) - assert pr.state == 'approved' - assert pr.reviewed_by - with repo: - prx.close() - assert pr.state == 'closed' - assert pr.head == c - assert not pr.reviewed_by - - with repo: - prx.open() - assert pr.state == 'opened' - assert not pr.reviewed_by - - with repo: - c2 = repo.make_commit(c, 'first', None, tree={'m': 'cc'}) + [c2] = repo.make_commits("master", Commit('first', tree={'m': 'cc'})) repo.update_ref(prx.ref, c2, force=True) assert pr.head == c2 + @pytest.mark.defaultstatuses def test_update_validated(self, env, repo): """ Should reset to opened """ with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/master', m) - - c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) - repo.post_status(prx.head, 'success', 'legal/cla') - repo.post_status(prx.head, 'success', 'ci/runbot') + [c] = repo.make_commits("master", Commit('first', tree={'m': 'c1'})) + pr = repo.make_pr(target='master', head=c) + repo.post_status(c, 'success') env.run_crons() - pr = to_pr(env, prx) - assert pr.head == c - assert pr.state == 'validated' + + pr_id = to_pr(env, pr) + assert pr_id.head == c + assert pr_id.state == 'validated' with repo: - c2 = repo.make_commit(m, 'first', None, tree={'m': 'cc'}) - repo.update_ref(prx.ref, c2, force=True) - assert pr.head == c2 - assert pr.state == 'opened' + [c2] = repo.make_commits("master", Commit('first', tree={'m': 'cc'})) + repo.update_ref(pr.ref, c2, force=True) + assert pr_id.head == c2 + assert pr_id.state == 'opened' def test_update_approved(self, env, repo, config): with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/master', m) - - c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) + [c] = repo.make_commits("master", Commit('fist', tree={'m': 'c1'})) + prx = repo.make_pr(target='master', head=c) prx.post_comment('hansen r+', config['role_reviewer']['token']) pr = to_pr(env, prx) @@ -2275,22 +2245,19 @@ class TestPRUpdate(object): assert pr.state == 'approved' with repo: - c2 = repo.make_commit(c, 'first', None, tree={'m': 'cc'}) + [c2] = repo.make_commits("master", Commit('first', tree={'m': 'cc'})) repo.update_ref(prx.ref, c2, force=True) assert pr.head == c2 assert pr.state == 'opened' + @pytest.mark.defaultstatuses def test_update_ready(self, env, repo, config): """ Should reset to opened """ with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/master', m) - - c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) - repo.post_status(prx.head, 'success', 'legal/cla') - repo.post_status(prx.head, 'success', 'ci/runbot') + [c] = repo.make_commits("master", Commit('fist', tree={'m': 'c1'})) + prx = repo.make_pr(target='master', head=c) + repo.post_status(prx.head, 'success') prx.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() pr = to_pr(env, prx) @@ -2298,22 +2265,19 @@ class TestPRUpdate(object): assert pr.state == 'ready' with repo: - c2 = repo.make_commit(c, 'first', None, tree={'m': 'cc'}) + [c2] = repo.make_commits(c, Commit('first', tree={'m': 'cc'})) repo.update_ref(prx.ref, c2, force=True) assert pr.head == c2 assert pr.state == 'opened' + @pytest.mark.defaultstatuses def test_update_staged(self, env, repo, config): """ Should cancel the staging & reset PR to opened """ with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/master', m) - - c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) - repo.post_status(prx.head, 'success', 'legal/cla') - repo.post_status(prx.head, 'success', 'ci/runbot') + [c] = repo.make_commits("master", Commit('fist', tree={'m': 'c1'})) + prx = repo.make_pr(target='master', head=c) + repo.post_status(prx.head, 'success') prx.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() @@ -2322,33 +2286,27 @@ class TestPRUpdate(object): assert pr.staging_id with repo: - c2 = repo.make_commit(c, 'first', None, tree={'m': 'cc'}) + [c2] = repo.make_commits(c, Commit('first', tree={'m': 'cc'})) repo.update_ref(prx.ref, c2, force=True) assert pr.head == c2 assert pr.state == 'opened' assert not pr.staging_id assert not env['runbot_merge.stagings'].search([]) + @pytest.mark.defaultstatuses def test_split(self, env, repo, config): """ Should remove the PR from its split, and possibly delete the split entirely. """ with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/master', m) - - c = repo.make_commit(m, 'first', None, tree={'m': 'm', '1': '1'}) - repo.make_ref('heads/p1', c) - prx1 = repo.make_pr(title='t1', body='b1', target='master', head='p1') - repo.post_status(prx1.head, 'success', 'legal/cla') - repo.post_status(prx1.head, 'success', 'ci/runbot') + repo.make_commits("master", Commit('first', tree={'1': '1'}), ref="heads/p1") + prx1 = repo.make_pr(target='master', head='p1') + repo.post_status(prx1.head, 'success') prx1.post_comment('hansen r+', config['role_reviewer']['token']) - c = repo.make_commit(m, 'first', None, tree={'m': 'm', '2': '2'}) - repo.make_ref('heads/p2', c) - prx2 = repo.make_pr(title='t2', body='b2', target='master', head='p2') - repo.post_status(prx2.head, 'success', 'legal/cla') - repo.post_status(prx2.head, 'success', 'ci/runbot') + [c] = repo.make_commits("master", Commit('first', tree={'2': '2'}), ref="heads/p2") + prx2 = repo.make_pr(target='master', head='p2') + repo.post_status(prx2.head, 'success') prx2.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() @@ -2359,7 +2317,7 @@ class TestPRUpdate(object): s0 = pr1.staging_id with repo: - repo.post_status('heads/staging.master', 'failure', 'ci/runbot') + repo.post_status('heads/staging.master', 'failure') env.run_crons() assert pr1.staging_id and pr1.staging_id != s0, "pr1 should have been re-staged" @@ -2369,22 +2327,20 @@ class TestPRUpdate(object): assert env['runbot_merge.split'].search([]) with repo: - repo.update_ref(prx2.ref, repo.make_commit(c, 'second', None, tree={'m': 'm', '2': '22'}), force=True) + [c2] = repo.make_commits(c, Commit('second', tree={'2': '22'})) + repo.update_ref(prx2.ref, c2, force=True) # probably not necessary ATM but... env.run_crons() assert pr2.state == 'opened', "state should have been reset" assert not env['runbot_merge.split'].search([]), "there should be no split left" + @pytest.mark.defaultstatuses def test_update_error(self, env, repo, config): with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/master', m) - - c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) - repo.post_status(prx.head, 'success', 'legal/cla') - repo.post_status(prx.head, 'success', 'ci/runbot') + [c] = repo.make_commits("master", Commit('fist', tree={'m': 'c1'})) + prx = repo.make_pr(target='master', head=c) + repo.post_status(prx.head, 'success') prx.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() pr = to_pr(env, prx) @@ -2392,24 +2348,25 @@ class TestPRUpdate(object): assert pr.staging_id with repo: - repo.post_status('staging.master', 'success', 'legal/cla') - repo.post_status('staging.master', 'failure', 'ci/runbot') + repo.post_status('staging.master', 'failure') env.run_crons() assert not pr.staging_id assert pr.state == 'error' with repo: - c2 = repo.make_commit(c, 'first', None, tree={'m': 'cc'}) + [c2] = repo.make_commits(c, Commit('first', tree={'m': 'cc'})) repo.update_ref(prx.ref, c2, force=True) assert pr.head == c2 assert pr.state == 'opened' def test_unknown_pr(self, env, repo): with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) + [m, c] = repo.make_commits( + None, + Commit('initial', tree={'m': 'm'}), + Commit('first', tree={'m': 'c1'}), + ) repo.make_ref('heads/1.0', m) - - c = repo.make_commit(m, 'first', None, tree={'m': 'c1'}) prx = repo.make_pr(title='title', body='body', target='1.0', head=c) assert not env['runbot_merge.pull_requests'].search([('number', '=', prx.number)]) @@ -2418,27 +2375,24 @@ class TestPRUpdate(object): }) with repo: - c2 = repo.make_commit(c, 'second', None, tree={'m': 'c2'}) + [c2] = repo.make_commits(c, Commit('second', tree={'m': 'c2'})) repo.update_ref(prx.ref, c2, force=True) assert not env['runbot_merge.pull_requests'].search([('number', '=', prx.number)]) + @pytest.mark.defaultstatuses def test_update_to_ci(self, env, repo): """ If a PR is updated to a known-valid commit, it should be validated """ with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/master', m) - - c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'}) - c2 = repo.make_commit(m, 'first', None, tree={'m': 'cc'}) - repo.post_status(c2, 'success', 'legal/cla') - repo.post_status(c2, 'success', 'ci/runbot') + [c] = repo.make_commits("master", Commit('fist', tree={'m': 'c1'})) + [c2] = repo.make_commits("master", Commit('first', tree={'m': 'cc'})) + repo.post_status(c2, 'success') env.run_crons() with repo: - prx = repo.make_pr(title='title', body='body', target='master', head=c) + prx = repo.make_pr(target='master', head=c) pr = to_pr(env, prx) assert pr.head == c assert pr.state == 'opened' @@ -2448,6 +2402,7 @@ class TestPRUpdate(object): assert pr.head == c2 assert pr.state == 'validated' + @pytest.mark.defaultstatuses def test_update_missed(self, env, repo, config, users): """ Sometimes github's webhooks don't trigger properly, a branch's HEAD does not get updated and we might e.g. attempt to merge a PR despite it @@ -2467,10 +2422,9 @@ class TestPRUpdate(object): repo.make_ref('heads/somethingelse', c) [c] = repo.make_commits( - 'heads/master', repo.Commit('title \n\nbody', tree={'a': '1'}), ref='heads/abranch') + 'master', repo.Commit('title \n\nbody', tree={'a': '1'}), ref='heads/abranch') pr = repo.make_pr(target='master', head='abranch') - repo.post_status(pr.head, 'success', 'legal/cla') - repo.post_status(pr.head, 'success', 'ci/runbot') + repo.post_status(pr.head, 'success') pr.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() @@ -2486,8 +2440,7 @@ class TestPRUpdate(object): # to the PR *actually* having more than 1 commit and thus needing # a configuration [c2] = repo.make_commits('heads/master', repo.Commit('c2', tree={'a': '2'})) - repo.post_status(c2, 'success', 'legal/cla') - repo.post_status(c2, 'success', 'ci/runbot') + repo.post_status(c2, 'success') repo.update_ref(pr.ref, c2, force=True) other = env['runbot_merge.branch'].create({ @@ -2596,61 +2549,43 @@ Please check and re-approve. f"Updated target, squash, message. Updated {pr_id.display_name} to ready. Updated to {c2}." ) - def test_update_closed(self, env, repo): + @pytest.mark.defaultstatuses + def test_update_closed(self, env, repo, config): with repo: - [m] = repo.make_commits(None, repo.Commit('initial', tree={'m': 'm'}), ref='heads/master') - - [c] = repo.make_commits(m, repo.Commit('first', tree={'m': 'm3'}), ref='heads/abranch') - prx = repo.make_pr(title='title', body='body', target='master', head=c) + [c] = repo.make_commits("master", repo.Commit('first', tree={'m': 'm3'}), ref='heads/abranch') + pr = repo.make_pr(target='master', head=c) + pr.post_comment("hansen r+", config['role_reviewer']['token']) env.run_crons() - pr = to_pr(env, prx) - assert pr.state == 'opened' - assert pr.head == c - assert pr.squash + + pr_id = to_pr(env, pr) + assert pr_id.state == 'approved' + assert pr_id.head == c + assert pr_id.squash + assert pr_id.reviewed_by with repo: - prx.close() - - with repo: - c2 = repo.make_commit(c, 'xxx', None, tree={'m': 'm4'}) - repo.update_ref(prx.ref, c2) - + pr.close() assert pr.state == 'closed' assert pr.head == c - assert pr.squash + assert not pr_id.reviewed_by + assert pr_id.squash with repo: - prx.open() - assert pr.state == 'opened' - assert pr.head == c2 - assert not pr.squash + [c2] = repo.make_commits(c, Commit('xxx', tree={'m': 'm4'})) + repo.update_ref(pr.ref, c2) + repo.post_status(c2, "success") + + assert pr_id.state == 'closed' + assert pr_id.head == c + assert not pr_id.reviewed_by + assert pr_id.squash - def test_update_closed_revalidate(self, env, repo): - """ The PR should be validated on opening and reopening in case there's - already a CI+ stored (as the CI might never trigger unless explicitly - re-requested) - """ with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/master', m) - - c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'}) - repo.post_status(c, 'success', 'legal/cla') - repo.post_status(c, 'success', 'ci/runbot') - prx = repo.make_pr(title='title', body='body', target='master', head=c) - - env.run_crons() - pr = to_pr(env, prx) - assert pr.state == 'validated', \ - "if a PR is created on a CI'd commit, it should be validated immediately" - - with repo: prx.close() - assert pr.state == 'closed' - - with repo: prx.open() - assert pr.state == 'validated', \ - "if a PR is reopened and had a CI'd head, it should be validated immediately" - + pr.open() + assert pr_id.state == 'validated' + assert pr_id.head == c2 + assert not pr_id.reviewed_by + assert not pr_id.squash class TestBatching(object): def _pr(self, repo, prefix, trees, *, target='master', user, reviewer, @@ -2970,7 +2905,6 @@ class TestBatching(object): pr0.post_comment('hansen NOW!', config['role_reviewer']['token']) env.run_crons() - # TODO: maybe just deactivate stagings instead of deleting them when canceling? assert not p_1.staging_id assert to_pr(env, pr0).staging_id From e309e1a3a2043b0ef2e18aeaa25b3d20ed9cd269 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 20 Sep 2024 12:17:17 +0200 Subject: [PATCH 14/45] [FIX] runbot_merge: don't over-bump timeout By updating the staging timeout every time we run `_compute_state` and still have a `pending` status, we'd actually bump the timeout *on every success CI* except for the last one. Which was never the intention and can add an hour or two to the mergebot-side timeout. Instead, add an `updated_at` attribute to statuses (name taken from the webhook payload even though we don't use that), read it when we see `pending` required statuses, and update the staging timeout based on that if necessary. That way as long as we don't get *new* pending statuses, the timeout doesn't get updated. Fixes #952 --- runbot_merge/controllers/__init__.py | 4 ++- runbot_merge/models/pull_requests.py | 46 ++++++++++++++++------------ runbot_merge/tests/test_basic.py | 23 +++++++++----- 3 files changed, 46 insertions(+), 27 deletions(-) diff --git a/runbot_merge/controllers/__init__.py b/runbot_merge/controllers/__init__.py index 5dac8125..17b8b005 100644 --- a/runbot_merge/controllers/__init__.py +++ b/runbot_merge/controllers/__init__.py @@ -2,6 +2,7 @@ import hashlib import hmac import logging import json +from datetime import datetime import sentry_sdk import werkzeug.exceptions @@ -358,7 +359,8 @@ def handle_status(env, event): event['context']: { 'state': event['state'], 'target_url': event['target_url'], - 'description': event['description'] + 'description': event['description'], + 'updated_at': datetime.now().isoformat(timespec='seconds'), } }) # create status, or merge update into commit *unless* the update is already diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index 577866cb..971ba99c 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -2133,6 +2133,7 @@ class Stagings(models.Model): if not self.env.user.has_group('runbot_merge.status'): raise AccessError("You are not allowed to post a status.") + now = datetime.datetime.now().isoformat(timespec='seconds') for s in self: if not s.target.project_id.staging_rpc: continue @@ -2145,6 +2146,7 @@ class Stagings(models.Model): 'state': status, 'target_url': target_url, 'description': description, + 'updated_at': now, } s.statuses_cache = json.dumps(st) @@ -2158,39 +2160,45 @@ class Stagings(models.Model): "heads.repository_id.status_ids.context", ) def _compute_state(self): - for s in self: - if s.state != 'pending': + for staging in self: + if staging.state != 'pending': continue # maps commits to the statuses they need required_statuses = [ - (h.commit_id.sha, h.repository_id.status_ids._for_staging(s).mapped('context')) - for h in s.heads + (h.commit_id.sha, h.repository_id.status_ids._for_staging(staging).mapped('context')) + for h in staging.heads ] - cmap = json.loads(s.statuses_cache) + cmap = json.loads(staging.statuses_cache) - update_timeout_limit = False - st = 'success' + last_pending = "" + state = 'success' for head, reqs in required_statuses: statuses = cmap.get(head) or {} - for v in map(lambda n: statuses.get(n, {}).get('state'), reqs): - if st == 'failure' or v in ('error', 'failure'): - st = 'failure' + for status in (statuses.get(n, {}) for n in reqs): + v = status.get('state') + if state == 'failure' or v in ('error', 'failure'): + state = 'failure' elif v is None: - st = 'pending' + state = 'pending' elif v == 'pending': - st = 'pending' - update_timeout_limit = True + state = 'pending' + last_pending = max(last_pending, status.get('updated_at', '')) else: assert v == 'success' - s.state = st - if s.state != 'pending': + staging.state = state + if staging.state != 'pending': self.env.ref("runbot_merge.merge_cron")._trigger() - if update_timeout_limit: - s.timeout_limit = datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout) - self.env.ref("runbot_merge.merge_cron")._trigger(s.timeout_limit) - _logger.debug("%s got pending status, bumping timeout to %s (%s)", self, s.timeout_limit, cmap) + + if last_pending: + timeout = datetime.datetime.fromisoformat(last_pending) \ + + datetime.timedelta(minutes=staging.target.project_id.ci_timeout) + + if timeout > staging.timeout_limit: + staging.timeout_limit = timeout + self.env.ref("runbot_merge.merge_cron")._trigger(timeout) + _logger.debug("%s got pending status, bumping timeout to %s", staging, timeout) def action_cancel(self): w = self.env['runbot_merge.stagings.cancel'].create({ diff --git a/runbot_merge/tests/test_basic.py b/runbot_merge/tests/test_basic.py index e59b8bf4..440518e6 100644 --- a/runbot_merge/tests/test_basic.py +++ b/runbot_merge/tests/test_basic.py @@ -605,7 +605,6 @@ def test_staging_ci_timeout(env, repo, config, page, update_op: Callable[[int], assert dangerbox assert dangerbox[0].text == 'timed out (>60 minutes)' -@pytest.mark.defaultstatuses def test_timeout_bump_on_pending(env, repo, config): with repo: [m, c] = repo.make_commits( @@ -615,8 +614,9 @@ def test_timeout_bump_on_pending(env, repo, config): ) repo.make_ref('heads/master', m) - prx = repo.make_pr(title='title', body='body', target='master', head=c) - repo.post_status(prx.head, 'success') + prx = repo.make_pr(target='master', head=c) + repo.post_status(prx.head, 'success', 'ci/runbot') + repo.post_status(prx.head, 'success', 'legal/cla') prx.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() @@ -624,9 +624,18 @@ def test_timeout_bump_on_pending(env, repo, config): old_timeout = odoo.fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(days=15)) st.timeout_limit = old_timeout with repo: - repo.post_status('staging.master', 'pending') + repo.post_status('staging.master', 'pending', 'ci/runbot') env.run_crons(None) - assert st.timeout_limit > old_timeout + assert st.timeout_limit > old_timeout, "receiving a pending status should bump the timeout" + + st.timeout_limit = old_timeout + # clear the statuses cache to remove the memoized status times + st.statuses_cache = "{}" + st.commit_ids.statuses = "{}" + with repo: + repo.post_status('staging.master', 'success', 'legal/cla') + env.run_crons(None) + assert st.timeout_limit == old_timeout, "receiving a success status should *not* bump the timeout" def test_staging_ci_failure_single(env, repo, users, config, page): """ on failure of single-PR staging, mark & notify failure @@ -3327,8 +3336,8 @@ class TestUnknownPR: c = env['runbot_merge.commit'].search([('sha', '=', prx.head)]) assert json.loads(c.statuses) == { - 'legal/cla': {'state': 'success', 'target_url': None, 'description': None}, - 'ci/runbot': {'state': 'success', 'target_url': 'http://example.org/wheee', 'description': None} + 'legal/cla': {'state': 'success', 'target_url': None, 'description': None, 'updated_at': matches("$$")}, + 'ci/runbot': {'state': 'success', 'target_url': 'http://example.org/wheee', 'description': None, 'updated_at': matches("$$")} } assert prx.comments == [ seen(env, prx, users), From 98868b520046f4dae28507b64ea96298cd6fdd04 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Tue, 24 Sep 2024 09:53:32 +0200 Subject: [PATCH 15/45] [IMP] runbot_merge: add notifications on inactive branch interactions Add warnings when trying to send comments / commands to PRs targeting inactive branches. This was missing leading to confusion, as one warning is clearly not enough. Fixes #941 --- runbot_merge/models/pull_requests.py | 44 +++++--- runbot_merge/tests/test_disabled_branch.py | 114 +++++++++++---------- 2 files changed, 91 insertions(+), 67 deletions(-) diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index 971ba99c..d04e2a72 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -620,26 +620,46 @@ class PullRequests(models.Model): return json.loads(self.overrides) return {} - def _get_or_schedule(self, repo_name, number, *, target=None, closing=False): + def _get_or_schedule(self, repo_name, number, *, target=None, closing=False) -> PullRequests | None: repo = self.env['runbot_merge.repository'].search([('name', '=', repo_name)]) if not repo: - return - - if target and not repo.project_id._has_branch(target): - self.env.ref('runbot_merge.pr.fetch.unmanaged')._send( - repository=repo, - pull_request=number, - format_args={'repository': repo, 'branch': target, 'number': number} + source = self.env['runbot_merge.events_sources'].search([('repository', '=', repo_name)]) + _logger.warning( + "Got a PR notification for unknown repository %s (source %s)", + repo_name, source, ) return - pr = self.search([ - ('repository', '=', repo.id), - ('number', '=', number,) - ]) + if target: + b = self.env['runbot_merge.branch'].with_context(active_test=False).search([ + ('project_id', '=', repo.project_id.id), + ('name', '=', target), + ]) + tmpl = None if b.active \ + else 'runbot_merge.handle.branch.inactive' if b\ + else 'runbot_merge.pr.fetch.unmanaged' + else: + tmpl = None + + pr = self.search([('repository', '=', repo.id), ('number', '=', number)]) + if pr and not pr.target.active: + tmpl = 'runbot_merge.handle.branch.inactive' + target = pr.target.name + + if tmpl and not closing: + self.env.ref(tmpl)._send( + repository=repo, + pull_request=number, + format_args={'repository': repo_name, 'branch': target, 'number': number}, + ) + if pr: return pr + # if the branch is unknown or inactive, no need to fetch the PR + if tmpl: + return + Fetch = self.env['runbot_merge.fetch_job'] if Fetch.search([('repository', '=', repo.id), ('number', '=', number)]): return diff --git a/runbot_merge/tests/test_disabled_branch.py b/runbot_merge/tests/test_disabled_branch.py index 88e5e490..eaf2e9f6 100644 --- a/runbot_merge/tests/test_disabled_branch.py +++ b/runbot_merge/tests/test_disabled_branch.py @@ -1,8 +1,11 @@ import pytest -from utils import seen, Commit, pr_page +from utils import seen, Commit, pr_page, to_pr -def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, config, users, page): + +pytestmark = pytest.mark.defaultstatuses + +def test_existing_pr_disabled_branch(env, project, repo, config, users, page): """ PRs to disabled branches are ignored, but what if the PR exists *before* the branch is disabled? """ @@ -10,20 +13,11 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf # new work assert env['base'].run_crons() - repo = make_repo('repo') - project.branch_ids.sequence = 0 project.write({'branch_ids': [ + (1, project.branch_ids.id, {'sequence': 0}), (0, 0, {'name': 'other', 'sequence': 1}), (0, 0, {'name': 'other2', 'sequence': 2}), ]}) - repo_id = env['runbot_merge.repository'].create({ - 'project_id': project.id, - 'name': repo.name, - 'status_ids': [(0, 0, {'context': 'status'})], - 'group_id': False, - }) - setreviewers(*project.repo_ids) - env['runbot_merge.events_sources'].create({'repository': repo.name}) with repo: [m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master') @@ -32,14 +26,11 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf [c] = repo.make_commits(ot, Commit('wheee', tree={'b': '2'})) pr = repo.make_pr(title="title", body='body', target='other', head=c) - repo.post_status(c, 'success', 'status') + repo.post_status(c, 'success') pr.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() - pr_id = env['runbot_merge.pull_requests'].search([ - ('repository', '=', repo_id.id), - ('number', '=', pr.number), - ]) + pr_id = to_pr(env, pr) branch_id = pr_id.target assert pr_id.staging_id staging_id = branch_id.active_staging_id @@ -47,9 +38,6 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf # staging of `pr` should have generated a staging branch _ = repo.get_ref('heads/staging.other') - # stagings should not need a tmp branch anymore, so this should not exist - with pytest.raises(AssertionError, match=r'Not Found'): - repo.get_ref('heads/tmp.other') # disable branch "other" branch_id.active = False @@ -74,7 +62,7 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf [target] = p.cssselect('table tr.bg-info') assert 'inactive' in target.classes assert target[0].text_content() == "other" - + env.run_crons() assert pr.comments == [ (users['reviewer'], "hansen r+"), seen(env, pr, users), @@ -82,37 +70,38 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf ] with repo: - [c2] = repo.make_commits(ot, Commit('wheee', tree={'b': '3'})) - repo.update_ref(pr.ref, c2, force=True) + [c2] = repo.make_commits(ot, Commit('wheee', tree={'b': '3'}), ref=pr.ref, make=False) + env.run_crons() + assert pr.comments[3] == ( + users['user'], + "This PR targets the disabled branch {repository}:{target}, it needs to be retargeted before it can be merged.".format( + repository=repo.name, + target="other", + ) + ) assert pr_id.head == c2, "pr should be aware of its update" with repo: pr.base = 'other2' - repo.post_status(c2, 'success', 'status') + repo.post_status(c2, 'success') pr.post_comment('hansen rebase-ff r+', config['role_reviewer']['token']) env.run_crons() + assert pr.comments[4:] == [ + (users['reviewer'], 'hansen rebase-ff r+'), + (users['user'], "Merge method set to rebase and fast-forward."), + ] + assert pr_id.state == 'ready' assert pr_id.target == env['runbot_merge.branch'].search([('name', '=', 'other2')]) assert pr_id.staging_id # staging of `pr` should have generated a staging branch _ = repo.get_ref('heads/staging.other2') - # stagings should not need a tmp branch anymore, so this should not exist - with pytest.raises(AssertionError, match=r'Not Found'): - repo.get_ref('heads/tmp.other2') -def test_new_pr_no_branch(env, project, make_repo, setreviewers, users): +def test_new_pr_no_branch(env, project, repo, users): """ A new PR to an *unknown* branch should be ignored and warn """ - repo = make_repo('repo') - repo_id = env['runbot_merge.repository'].create({ - 'project_id': project.id, - 'name': repo.name, - 'status_ids': [(0, 0, {'context': 'status'})] - }) - setreviewers(*project.repo_ids) - env['runbot_merge.events_sources'].create({'repository': repo.name}) with repo: [m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master') @@ -123,30 +112,18 @@ def test_new_pr_no_branch(env, project, make_repo, setreviewers, users): env.run_crons() assert not env['runbot_merge.pull_requests'].search([ - ('repository', '=', repo_id.id), + ('repository', '=', project.repo_ids.id), ('number', '=', pr.number), ]), "the PR should not have been created in the backend" assert pr.comments == [ (users['user'], "This PR targets the un-managed branch %s:other, it needs to be retargeted before it can be merged." % repo.name), ] -def test_new_pr_disabled_branch(env, project, make_repo, setreviewers, users): +def test_new_pr_disabled_branch(env, project, repo, users): """ A new PR to a *disabled* branch should be accepted (rather than ignored) but should warn """ - repo = make_repo('repo') - repo_id = env['runbot_merge.repository'].create({ - 'project_id': project.id, - 'name': repo.name, - 'status_ids': [(0, 0, {'context': 'status'})] - }) - env['runbot_merge.branch'].create({ - 'project_id': project.id, - 'name': 'other', - 'active': False, - }) - setreviewers(*project.repo_ids) - env['runbot_merge.events_sources'].create({'repository': repo.name}) + project.write({'branch_ids': [(0, 0, {'name': 'other', 'active': False})]}) with repo: [m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master') @@ -156,13 +133,40 @@ def test_new_pr_disabled_branch(env, project, make_repo, setreviewers, users): pr = repo.make_pr(title="title", body='body', target='other', head=c) env.run_crons() - pr_id = env['runbot_merge.pull_requests'].search([ - ('repository', '=', repo_id.id), - ('number', '=', pr.number), - ]) + pr_id = to_pr(env, pr) assert pr_id, "the PR should have been created in the backend" assert pr_id.state == 'opened' assert pr.comments == [ (users['user'], "This PR targets the disabled branch %s:other, it needs to be retargeted before it can be merged." % repo.name), seen(env, pr, users), ] + +def test_review_disabled_branch(env, project, repo, users, config): + with repo: + [m] = repo.make_commits(None, Commit("init", tree={'m': 'm'}), ref='heads/master') + + [c] = repo.make_commits(m, Commit('pr', tree={'m': 'n'})) + pr = repo.make_pr(target="master", head=c) + env.run_crons() + target = project.branch_ids + target.active = False + env.run_crons() + with repo: + pr.post_comment("A normal comment", config['role_other']['token']) + with repo: + pr.post_comment("hansen r+", config['role_reviewer']['token']) + env.run_crons() + + assert pr.comments == [ + seen(env, pr, users), + (users['user'], "@{user} the target branch {target!r} has been disabled, you may want to close this PR.".format( + **users, + target=target.name, + )), + (users['other'], "A normal comment"), + (users['reviewer'], "hansen r+"), + (users['user'], "This PR targets the disabled branch {repository}:{target}, it needs to be retargeted before it can be merged.".format( + repository=repo.name, + target=target.name, + )), + ] From 26882c42aad3c61c2ea988414c30f08e86561ad9 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 27 Sep 2024 12:36:02 +0200 Subject: [PATCH 16/45] [FIX] warning in test logs --- conftest.py | 1 + 1 file changed, 1 insertion(+) diff --git a/conftest.py b/conftest.py index 539cf272..3226d824 100644 --- a/conftest.py +++ b/conftest.py @@ -477,6 +477,7 @@ class IrCron(models.Model): (mod / '__manifest__.py').write_text(pprint.pformat({ 'name': 'dummy saas_worker', 'version': '1.0', + 'license': 'BSD-0-Clause', }), encoding='utf-8') (mod / 'util.py').write_text("""\ def from_role(*_, **__): From ef22529620afbae3bfe4a398de4a2416a511a7b7 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 27 Sep 2024 12:36:50 +0200 Subject: [PATCH 17/45] [ADD] support for recursive tree to the test GH proxy --- conftest.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/conftest.py b/conftest.py index 3226d824..7163552f 100644 --- a/conftest.py +++ b/conftest.py @@ -773,19 +773,20 @@ class Repo: parents=[p['sha'] for p in gh_commit['parents']], ) - def read_tree(self, commit): + def read_tree(self, commit: Commit, *, recursive=False) -> dict[str, str]: """ read tree object from commit - - :param Commit commit: - :rtype: Dict[str, str] """ - r = self._session.get('https://api.github.com/repos/{}/git/trees/{}'.format(self.name, commit.tree)) + r = self._session.get( + 'https://api.github.com/repos/{}/git/trees/{}'.format(self.name, commit.tree), + params={'recursive': '1'} if recursive else None, + ) assert 200 <= r.status_code < 300, r.text # read tree's blobs tree = {} for t in r.json()['tree']: - assert t['type'] == 'blob', "we're *not* doing recursive trees in test cases" + if t['type'] != 'blob': + continue # only keep blobs r = self._session.get('https://api.github.com/repos/{}/git/blobs/{}'.format(self.name, t['sha'])) assert 200 <= r.status_code < 300, r.text tree[t['path']] = base64.b64decode(r.json()['content']).decode() From 8f27773f8d158d53a7ac37197b48f173d0c5411c Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 27 Sep 2024 12:37:49 +0200 Subject: [PATCH 18/45] [IMP] forwardport: surfacing of modify/delete conflicts Given branch A, and branch B forked from it. If B removes a file which a PR to A later modifies, on forward port Git generates a modify/delete conflict (as in one side modifies a file which the other deleted). So far so good, except while it does notify the caller of the issue the modified file is just dumped as-is into the working copy (or commit), which essentially resurrects it. This is an issue, *especially* as the file is already part of a commit (rather tan just a U local file), if the developer fixes the conflict "in place" rather than re-doing the forward-port from scratch it's easy to miss the reincarnated file (and possibly the changes that were part of it too), which at best leaves parasitic dead code in the working copy. There is also no easy way for the runbot to check it as adding unimported standalone files while rare is not unknown e.g. utility scripts (to say nothing of JS where we can't really track the usages / imports at all). To resolve this issue, during conflict generation post-process modify/delete to insert artificial conflict markers, the file should be syntactically invalid so linters / checkers should complain, and the minimal check has a step looking for conflict markers which should fire and prevent merging the PR. Fixes #896 --- forwardport/models/project.py | 22 +++++- forwardport/tests/test_conflicts.py | 114 ++++++++++++++++++++++++---- runbot_merge/git.py | 44 ++++++++++- 3 files changed, 161 insertions(+), 19 deletions(-) diff --git a/forwardport/models/project.py b/forwardport/models/project.py index 0031b61b..f26ebfd5 100644 --- a/forwardport/models/project.py +++ b/forwardport/models/project.py @@ -346,7 +346,7 @@ class PullRequests(models.Model): '--merge-base', commits[0]['parents'][0]['sha'], target_branch.name, root.head, - ) + ).stdout.decode().splitlines(keepends=False)[0] # if there was a single commit, reuse its message when committing # the conflict if len(commits) == 1: @@ -362,10 +362,28 @@ stderr: {err} """ + # if a file is modified by the original PR and added by the forward + # port / conflict it's a modify/delete conflict: the file was + # deleted in the target branch, and the update (modify) in the + # original PR causes it to be added back + original_modified = set(conf.diff( + "--diff-filter=M", "--name-only", + "--merge-base", root.target.name, + root.head, + ).stdout.decode().splitlines(keepends=False)) + conflict_added = set(conf.diff( + "--diff-filter=A", "--name-only", + target_branch.name, + tree, + ).stdout.decode().splitlines(keepends=False)) + if modify_delete := (conflict_added & original_modified): + # rewrite the tree with conflict markers added to modify/deleted blobs + tree = conf.modify_delete(tree, modify_delete) + target_head = source.stdout().rev_parse(f"refs/heads/{target_branch.name}")\ .stdout.decode().strip() commit = conf.commit_tree( - tree=tree.stdout.decode().splitlines(keepends=False)[0], + tree=tree, parents=[target_head], message=str(msg), author=author, diff --git a/forwardport/tests/test_conflicts.py b/forwardport/tests/test_conflicts.py index 3c0bbda6..f6e23bbf 100644 --- a/forwardport/tests/test_conflicts.py +++ b/forwardport/tests/test_conflicts.py @@ -262,7 +262,7 @@ def test_massive_conflict(env, config, make_repo): def test_conflict_deleted(env, config, make_repo): - prod, other = make_basic(env, config, make_repo) + prod, other = make_basic(env, config, make_repo, statuses="default") # remove f from b with prod: prod.make_commits( @@ -277,18 +277,12 @@ def test_conflict_deleted(env, config, make_repo): ref='heads/conflicting' ) pr = prod.make_pr(target='a', head='conflicting') - prod.post_status(p_0, 'success', 'legal/cla') - prod.post_status(p_0, 'success', 'ci/runbot') + prod.post_status(p_0, 'success') pr.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() with prod: - prod.post_status('staging.a', 'success', 'legal/cla') - prod.post_status('staging.a', 'success', 'ci/runbot') - - env.run_crons() - # wait a bit for PR webhook... ? - time.sleep(5) + prod.post_status('staging.a', 'success') env.run_crons() # should have created a new PR @@ -300,9 +294,14 @@ def test_conflict_deleted(env, config, make_repo): 'g': 'c', } assert pr1.state == 'opened' - # NOTE: no actual conflict markers because pr1 essentially adds f de-novo assert prod.read_tree(prod.commit(pr1.head)) == { - 'f': 'xxx', + 'f': matches("""\ +<<<\x3c<<< $$ +||||||| $$ +======= +xxx +>>>\x3e>>> $$ +"""), 'g': 'c', } @@ -333,6 +332,89 @@ def test_conflict_deleted(env, config, make_repo): } assert pr1.state == 'opened', "state should be open still" + +def test_conflict_deleted_deep(env, config, make_repo): + """ Same as the previous one but files are deeper than toplevel, and we only + want to see if the conflict post-processing works. + """ + # region: setup + prod = make_repo("test") + env['runbot_merge.events_sources'].create({'repository': prod.name}) + with prod: + [a, b] = prod.make_commits( + None, + Commit("a", tree={ + "foo/bar/baz": "1", + "foo/bar/qux": "1", + "corge/grault": "1", + }), + Commit("b", tree={"foo/bar/qux": "2"}, reset=True), + ) + prod.make_ref("heads/a", a) + prod.make_ref("heads/b", b) + + project = env['runbot_merge.project'].create({ + 'name': "test", + 'github_token': config['github']['token'], + 'github_prefix': 'hansen', + 'fp_github_token': config['github']['token'], + 'fp_github_name': 'herbert', + 'branch_ids': [ + (0, 0, {'name': 'a', 'sequence': 100}), + (0, 0, {'name': 'b', 'sequence': 80}), + ], + "repo_ids": [ + (0, 0, { + 'name': prod.name, + 'required_statuses': "default", + 'fp_remote_target': prod.fork().name, + 'group_id': False, + }) + ] + }) + env['res.partner'].search([ + ('github_login', '=', config['role_reviewer']['user']) + ]).write({ + 'review_rights': [(0, 0, {'repository_id': project.repo_ids.id, 'review': True})] + }) + # endregion + + with prod: + prod.make_commits( + 'a', + Commit("c", tree={ + "foo/bar/baz": "2", + "corge/grault": "insert funny number", + }), + ref="heads/conflicting", + ) + pr = prod.make_pr(target='a', head='conflicting') + prod.post_status("conflicting", "success") + pr.post_comment("hansen r+", config['role_reviewer']['token']) + env.run_crons() + + with prod: + prod.post_status('staging.a', 'success') + env.run_crons() + _, pr1 = env['runbot_merge.pull_requests'].search([], order='number') + assert prod.read_tree(prod.commit(pr1.head), recursive=True) == { + "foo/bar/qux": "2", + "foo/bar/baz": """\ +<<<<<<< HEAD +||||||| MERGE BASE +======= +2 +>>>>>>> FORWARD PORTED +""", + "corge/grault": """\ +<<<<<<< HEAD +||||||| MERGE BASE +======= +insert funny number +>>>>>>> FORWARD PORTED +""" + }, "the conflicting file should have had conflict markers fixed in" + def test_multiple_commits_same_authorship(env, config, make_repo): """ When a PR has multiple commits by the same author and its forward-porting triggers a conflict, the resulting (squashed) conflict @@ -436,13 +518,13 @@ def test_multiple_commits_different_authorship(env, config, make_repo, users, ro "author set to the bot but an empty email" assert get(c.committer) == (bot, '') - assert re.match(r'''<<<\x3c<<< HEAD + assert prod.read_tree(c)['g'] == matches('''<<<\x3c<<< b b -|||||||| parent of [\da-f]{7,}.* +||||||| $$ ======= 2 ->>>\x3e>>> [\da-f]{7,}.* -''', prod.read_tree(c)['g']) +>>>\x3e>>> $$ +''') # I'd like to fix the conflict so everything is clean and proper *but* # github's API apparently rejects creating commits with an empty email. @@ -459,7 +541,7 @@ b assert pr2.comments == [ seen(env, pr2, users), - (users['user'], matches('@%s @%s $$CONFLICT' % (users['user'], users['reviewer']))), + (users['user'], matches('@%(user)s @%(reviewer)s $$CONFLICT' % users)), (users['reviewer'], 'hansen r+'), (users['user'], f"@{users['user']} @{users['reviewer']} unable to stage: " "All commits must have author and committer email, " diff --git a/runbot_merge/git.py b/runbot_merge/git.py index caaa6e5d..d9937329 100644 --- a/runbot_merge/git.py +++ b/runbot_merge/git.py @@ -6,7 +6,8 @@ import pathlib import resource import stat import subprocess -from typing import Optional, TypeVar, Union, Sequence, Tuple, Dict +from operator import methodcaller +from typing import Optional, TypeVar, Union, Sequence, Tuple, Dict, Iterable from odoo.tools.appdirs import user_cache_dir from .github import MergeError, PrCommit @@ -245,6 +246,47 @@ class Repo: *itertools.chain.from_iterable(('-p', p) for p in parents), ) + + def modify_delete(self, tree: str, files: Iterable[str]) -> str: + """Updates ``files`` in ``tree`` to add conflict markers to show them + as being modify/delete-ed, rather than have only the original content. + + This is because having just content in a valid file is easy to miss, + causing file resurrections as they get committed rather than re-removed. + + TODO: maybe extract the diff information compared to before they were removed? idk + """ + # FIXME: either ignore or process binary files somehow (how does git show conflicts in binary files?) + repo = self.stdout().with_config(stderr=None, text=True, check=False, encoding="utf-8") + for f in files: + contents = repo.cat_file("-p", f"{tree}:{f}").stdout + # decorate the contents as if HEAD and BASE are empty + oid = repo\ + .with_config(input=f"""\ +<<<\x3c<<< HEAD +||||||| MERGE BASE +======= +{contents} +>>>\x3e>>> FORWARD PORTED +""")\ + .hash_object("-w", "--stdin", "--path", f)\ + .stdout.strip() + + # we need to rewrite every tree from the parent of `f` + while f: + f, _, local = f.rpartition("/") + # tree to update, `{tree}:` works as an alias for tree + lstree = repo.ls_tree(f"{tree}:{f}").stdout.splitlines(keepends=False) + new_tree = "".join( + # tab before name is critical to the format + f"{mode} {typ} {oid if name == local else sha}\t{name}\n" + for mode, typ, sha, name in map(methodcaller("split", None, 3), lstree) + ) + oid = repo.with_config(input=new_tree, check=True).mktree().stdout.strip() + tree = oid + return tree + + def check(p: subprocess.CompletedProcess) -> subprocess.CompletedProcess: if not p.returncode: return p From 430ccab2cbf1c57b351412f3b54030ad565bbef5 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 27 Sep 2024 12:53:51 +0200 Subject: [PATCH 19/45] [IMP] runbot_merge: suppress view validation warning This is a dumb false positive, kill it. --- runbot_merge/models/__init__.py | 1 + runbot_merge/models/ir_ui_view.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 runbot_merge/models/ir_ui_view.py diff --git a/runbot_merge/models/__init__.py b/runbot_merge/models/__init__.py index f15f98f2..c1cee75c 100644 --- a/runbot_merge/models/__init__.py +++ b/runbot_merge/models/__init__.py @@ -1,4 +1,5 @@ from . import ir_actions +from . import ir_ui_view from . import res_partner from . import project from . import pull_requests diff --git a/runbot_merge/models/ir_ui_view.py b/runbot_merge/models/ir_ui_view.py new file mode 100644 index 00000000..fa915348 --- /dev/null +++ b/runbot_merge/models/ir_ui_view.py @@ -0,0 +1,17 @@ +from odoo import models + + +class View(models.Model): + _inherit = 'ir.ui.view' + + def _log_view_warning(self, msg, node): + """The view validator is dumb and triggers a warning because there's a + `field.btn`, even though making a `field[widget=url]` (which renders as + a link) look like a button is perfectly legitimate. + + Suppress that warning. + """ + if node.tag == 'field' and node.get('widget') == 'url' and "button/submit/reset" in msg: + return + + super()._log_view_warning(msg, node) From aac987f2bbea3164e60a94bb2460c3a2e2f0ac31 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 27 Sep 2024 14:13:43 +0200 Subject: [PATCH 20/45] [FIX] runbot_merge: dashboard display nits - fix staging reasons containing escaped quotes (would render as ` ` to the end user) - remove extra spacing around PR title @title/popovers - simplify a few view conditionals through judicious use of `t-elif` and nesting - make `staging_end` non-computed as it's not computed anymore, just set if and when the staging gets disabled (146564a90a7c4ee6bc111380886e2e1ec1dabe7b) --- runbot_merge/models/pull_requests.py | 2 +- runbot_merge/views/templates.xml | 42 +++++++++++++--------------- 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index d04e2a72..810eb673 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -2048,7 +2048,7 @@ class Stagings(models.Model): active = fields.Boolean(default=True) staged_at = fields.Datetime(default=fields.Datetime.now, index=True) - staging_end = fields.Datetime(store=True, compute='_compute_state') + staging_end = fields.Datetime(store=True) staging_duration = fields.Float(compute='_compute_duration') timeout_limit = fields.Datetime(store=True, compute='_compute_timeout_limit') reason = fields.Text("Reason for final state (if any)") diff --git a/runbot_merge/views/templates.xml b/runbot_merge/views/templates.xml index 2dff930f..2ac80f83 100644 --- a/runbot_merge/views/templates.xml +++ b/runbot_merge/views/templates.xml @@ -5,21 +5,18 @@