[CHG] runbot_merge: make merge method non-blocking

Because of the false negatives due to github's reordering of events on
retargeting, blocking merge methods can be rather frustrating or the
user as what's happening and how to solve it isn't clear in that case.

Keep the warnings and all, but remove the blocking factor: if a PR
doesn't have a merge method and is not single-commit, just skip it on
staging. This way, PRs which are actually single-commit will stage
fine even if the mergebot thinks they shouldn't be.

Fixes #957
This commit is contained in:
Xavier Morel 2024-10-07 08:07:59 +02:00
parent 4215be770d
commit 20a4e97b05
7 changed files with 131 additions and 19 deletions

View File

@ -2,7 +2,7 @@ import hashlib
import hmac
import logging
import json
from datetime import datetime
from datetime import datetime, timedelta
import sentry_sdk
import werkzeug.exceptions
@ -294,6 +294,12 @@ def handle_pr(env, event):
event['sender']['login'],
pr['commits'] == 1
)
if pr['base']['ref'] != pr_obj.target.name:
env['runbot_merge.fetch_job'].create({
'repository': pr_obj.repository.id,
'number': pr_obj.number,
'commits_at': datetime.now() + timedelta(minutes=5),
})
pr_obj.write({
'reviewed_by': False,

View File

@ -5,4 +5,7 @@ class FastForwardError(Exception):
class Mismatch(MergeError):
pass
class Unmergeable(MergeError):
...
pass
class Skip(MergeError):
pass

View File

@ -195,7 +195,7 @@ class Batch(models.Model):
@api.depends(
"merge_date",
"prs.error", "prs.draft", "prs.squash", "prs.merge_method",
"prs.error", "prs.draft",
"skipchecks",
"prs.status", "prs.reviewed_by", "prs.target",
)
@ -208,7 +208,7 @@ class Batch(models.Model):
elif len(targets := batch.prs.mapped('target')) > 1:
batch.blocked = f"Multiple target branches: {', '.join(targets.mapped('name'))!r}"
elif blocking := batch.prs.filtered(
lambda p: p.error or p.draft or not (p.squash or p.merge_method)
lambda p: p.error or p.draft
):
batch.blocked = "Pull request(s) %s blocked." % ', '.join(blocking.mapped('display_name'))
elif not batch.skipchecks and (unready := batch.prs.filtered(

View File

@ -1,6 +1,7 @@
from __future__ import annotations
import ast
import builtins
import collections
import contextlib
import datetime
@ -106,7 +107,7 @@ All substitutions are tentatively applied sequentially to the input.
self._cr, 'runbot_merge_unique_repo', self._table, ['name'])
return res
def _load_pr(self, number, *, closing=False):
def _load_pr(self, number, *, closing=False, squash=False):
gh = self.github()
# fetch PR object and handle as *opened*
@ -133,6 +134,10 @@ All substitutions are tentatively applied sequentially to the input.
('number', '=', number),
])
if pr_id:
if squash:
pr_id.squash = pr['commits'] == 1
return
sync = controllers.handle_pr(self.env, {
'action': 'synchronize',
'pull_request': pr,
@ -583,8 +588,6 @@ class PullRequests(models.Model):
@api.depends(
'batch_id.prs.draft',
'batch_id.prs.squash',
'batch_id.prs.merge_method',
'batch_id.prs.state',
'batch_id.skipchecks',
)
@ -592,12 +595,11 @@ class PullRequests(models.Model):
self.blocked = False
requirements = (
lambda p: not p.draft,
lambda p: p.squash or p.merge_method,
lambda p: p.state == 'ready' \
or p.batch_id.skipchecks \
and all(pr.state != 'error' for pr in p.batch_id.prs)
)
messages = ('is in draft', 'has no merge method', 'is not ready')
messages = ('is in draft', 'is not ready')
for pr in self:
if pr.state in ('merged', 'closed'):
continue
@ -834,6 +836,10 @@ For your own safety I've ignored *everything in your entire comment*.
pull_request=self.number,
format_args={'new_method': explanation, 'pr': self, 'user': login},
)
# if the merge method is the only thing preventing (but not
# *blocking*) staging, trigger a staging
if self.state == 'ready':
self.env.ref("runbot_merge.staging_cron")._trigger()
case commands.Retry() if is_author or source_author:
if self.error:
self.error = False
@ -2497,31 +2503,41 @@ class FetchJob(models.Model):
repository = fields.Many2one('runbot_merge.repository', required=True)
number = fields.Integer(required=True, group_operator=None)
closing = fields.Boolean(default=False)
commits_at = fields.Datetime(index="btree_not_null")
@api.model_create_multi
def create(self, vals_list):
self.env.ref('runbot_merge.fetch_prs_cron')._trigger()
now = fields.Datetime.now()
self.env.ref('runbot_merge.fetch_prs_cron')._trigger({
fields.Datetime.to_datetime(
vs.get('commits_at') or now
)
for vs in vals_list
})
return super().create(vals_list)
def _check(self, commit=False):
"""
:param bool commit: commit after each fetch has been executed
"""
now = getattr(builtins, 'current_date', None) or fields.Datetime.to_string(datetime.datetime.now())
while True:
f = self.search([], limit=1)
f = self.search([
'|', ('commits_at', '=', False), ('commits_at', '<=', now)
], limit=1)
if not f:
return
f.active = False
self.env.cr.execute("SAVEPOINT runbot_merge_before_fetch")
try:
f.repository._load_pr(f.number, closing=f.closing)
f.repository._load_pr(f.number, closing=f.closing, squash=bool(f.commits_at))
except Exception:
self.env.cr.execute("ROLLBACK TO SAVEPOINT runbot_merge_before_fetch")
_logger.exception("Failed to load pr %s, skipping it", f.number)
finally:
self.env.cr.execute("RELEASE SAVEPOINT runbot_merge_before_fetch")
f.active = False
if commit:
self.env.cr.commit()

View File

@ -262,6 +262,10 @@ def stage_batches(branch: Branch, batches: Batch, staging_state: StagingState) -
break
try:
staged |= stage_batch(env, batch, staging_state)
except exceptions.Skip:
# skip silently because the PR will be retried on every staging
# which is going to be ultra spammy
pass
except exceptions.MergeError as e:
pr = e.args[0]
_logger.info("Failed to stage %s into %s", pr.display_name, branch.name)
@ -419,9 +423,19 @@ def stage(pr: PullRequests, info: StagingSlice, related_prs: PullRequests) -> Tu
invalid['target'] = branch.id
diff.append(('Target branch', pr.target.name, branch.name))
if pr.squash != commits == 1:
invalid['squash'] = commits == 1
diff.append(('Single commit', pr.squash, commits == 1))
if not method:
if not pr.method_warned:
pr.env.ref('runbot_merge.pr.merge_method')._send(
repository=pr.repository,
pull_request=pr.number,
format_args={'pr': pr, 'methods': ''.join(
'* `%s` to %s\n' % pair
for pair in type(pr).merge_method.selection
if pair[0] != 'squash'
)},
)
pr.method_warned = True
raise exceptions.Skip()
msg = utils.make_message(prdict)
if pr.message != msg:

View File

@ -2211,7 +2211,7 @@ class TestPRUpdate:
def test_update_opened(self, env, repo):
with repo:
[c] = repo.make_commits("master", Commit('fist', tree={'m': 'c1'}))
[c] = repo.make_commits("master", Commit('first', tree={'m': 'c1'}))
prx = repo.make_pr(target='master', head=c)
pr = to_pr(env, prx)
@ -2595,6 +2595,77 @@ Please check and re-approve.
assert not pr_id.reviewed_by
assert not pr_id.squash
@pytest.mark.defaultstatuses
def test_update_incorrect_commits_count(self, port, env, project, repo, config, users):
"""This is not a great test but it aims to kinda sorta simulate the
behaviour when a user retargets and updates a PR at about the same time:
github can send the hooks in the wrong order, which leads to the correct
base and head but can lead to the wrong squash status.
"""
project.write({
'branch_ids': [(0, 0, {
'name': 'xxx',
})]
})
with repo:
[c] = repo.make_commits("master", Commit("c", tree={"m": "n"}), ref="heads/thing")
pr = repo.make_pr(target='master', head='thing')
pr_id = to_pr(env, pr)
pr_id.head = '0'*40
with requests.Session() as s:
r = s.post(
f"http://localhost:{port}/runbot_merge/hooks",
headers={
"X-Github-Event": "pull_request",
},
json={
'action': 'synchronize',
'sender': {
'login': users['user'],
},
'repository': {
'full_name': repo.name,
},
'pull_request': {
'number': pr.number,
'head': {'sha': c},
'title': "c",
'commits': 40123,
'base': {
'ref': 'xxx',
'repo': {
'full_name': repo.name,
},
}
}
}
)
r.raise_for_status()
assert pr_id.head == c, "the head should have been updated"
assert not pr_id.squash, "the wrong count should be used"
with repo:
pr.post_comment("hansen r+", config['role_reviewer']['token'])
repo.post_status(c, 'success')
env.run_crons()
assert not pr_id.blocked
assert pr_id.message_ids[::-1].mapped(lambda m: (
((m.subject or '') + '\n\n' + m.body).strip(),
list(map(read_tracking_value, m.tracking_value_ids)),
)) == [
('<p>Pull Request created</p>', []),
('', [('head', c, '0'*40)]),
('', [('head', '0'*40, c), ('squash', 1, 0)]),
('', [('state', 'Opened', 'Approved'), ('reviewed_by', '', 'Reviewer')]),
(f'<p>statuses changed on {c}</p>', [('state', 'Approved', 'Ready')]),
]
assert pr_id.staging_id
with repo:
repo.post_status('staging.master', 'success')
env.run_crons()
assert pr_id.merge_date
class TestBatching(object):
def _pr(self, repo, prefix, trees, *, target='master', user, reviewer,
statuses=(('ci/runbot', 'success'), ('legal/cla', 'success'))

View File

@ -806,10 +806,12 @@ class TestBlocked:
p = to_pr(env, pr)
assert p.state == 'ready'
assert p.blocked
assert not p.blocked
assert not p.staging_id
with repo_a: pr.post_comment('hansen rebase-merge', config['role_reviewer']['token'])
assert not p.blocked
env.run_crons()
assert p.staging_id
def test_linked_closed(self, env, repo_a, repo_b, config):
with repo_a, repo_b: