[FIX] *: double forwardport when adding a PR to an existing batch

This is a bit of an odd case which was only noticed because of
persistent forwardport.batches, which ended up having a ton of related
traceback in the logs (the mergebot kept trying to create forward
ports from Jan 27th to Feb 10th, thankfully the errors happened in git
so did not seem to eat through our API rate limiting).

The issue was triggered by the addition of odoo/enterprise#77876 to
odoo/odoo#194818. This triggered a completion job which led to the
creation of odoo/enterprise#77877 to odoo/enterprise#77880, so far so
good.

Except the bit of code responsible for creating completion jobs only
checked if the PR was being added to a batch with a descendant. That
is the case of odoo/enterprise#77877 to odoo/enterprise#77879 (not
odoo/enterprise#77880 because that's the end of the line). As a
result, those triggered 3 more completion jobs, which kept failing in
a loop because they tried pushing different commits to their
next-siblings (without forcing, leading git to reject the non-ff push,
hurray).

A completion request should only be triggered by the addition of a
new *source* (a PR without a source) to an existing batch with
descendants, so add that to the check. This requires updating
`_from_json` to create PRs in a single step (rather than one step to
create based on github's data, and an other one for the hierarchical
tracking) as we need the source to be set during `create` not as a
post-action.

Although there was a test which could have triggered this issue, the
test only had 3 branches so was not long enough to trigger the issue:

- Initial PR 1 on branch A merged then forward-ported to B and C.
- Sibling PR 2 added to the batch in B.
- Completed to C.
- Ending there as C(1) has no descendant batch, leading to no further
  completion request.

Adding a 4th branch did surface / show the issue by providing space
for a new completion request from the creation of C(2). Interestingly
even though I the test harness attempts to run all triggered crons to
completion there can be misses, so the test can fail in two different
ways (being now checked for):

- there's a leftover forwardport.batch after we've created all our
  forwardports
- there's an extra PR targeting D, descending from C(2)
- in almost every case there's also a traceback in the logs, which
  does fail the build thanks to the `env` fixture's check
This commit is contained in:
Xavier Morel 2025-02-11 14:27:53 +01:00
parent 6d8f9d9fb0
commit 0dd11acce8
6 changed files with 75 additions and 69 deletions

View File

@ -241,17 +241,15 @@ class ForwardPortTasks(models.Model, Queue):
_logger.warning("Deleting %s:%s=%s", remote_target, ref, d.text)
raise RuntimeError(f"Forwardport failure: {pr.display_name} ({r.text})")
new_pr = PullRequests._from_gh(r.json())
_logger.info("Created forward-port PR %s", new_pr)
new_pr.write({
'batch_id': descendant.id, # should already be set correctly but...
'merge_method': pr.merge_method,
'source_id': source.id,
# only link to previous PR of sequence if cherrypick passed
# FIXME: apply parenting of siblings? Apply parenting *to* siblings?
'parent_id': pr.id if not conflict else False,
'detach_reason': "{1}\n{2}".format(*conflict).strip() if conflict else None,
})
new_pr = PullRequests._from_gh(
r.json(),
batch_id=descendant.id,
merge_method=pr.merge_method,
source_id=source.id,
parent_id=False if conflict else pr.id,
detach_reason="{1}\n{2}".format(*conflict).strip() if conflict else None
)
_logger.info("Created forward-port PR %s", new_pr.display_name)
if conflict:
self.env.ref('runbot_merge.forwardport.failure.conflict')._send(

View File

@ -213,10 +213,11 @@ class PullRequests(models.Model):
new = super().create(to_create)
for pr in new:
# added a new PR to an already forward-ported batch: port the PR
if self.env['runbot_merge.batch'].search_count([
# added a new PR to an already forward-ported batch: immediately
# port forward to complete the genealogy
if not pr.source_id and self.env['runbot_merge.batch'].search_count([
('parent_id', '=', pr.batch_id.id),
]):
], limit=1):
self.env['forwardport.batches'].create({
'batch_id': pr.batch_id.id,
'source': 'complete',

View File

@ -227,6 +227,13 @@ def test_add_to_forward_ported(env, config, make_repo, users):
# region setup
r1, _ = make_basic(env, config, make_repo, statuses="default")
r2, fork2 = make_basic(env, config, make_repo, statuses="default")
project = env['runbot_merge.project'].search([])
project.write({
'branch_ids': [(0, 0, {'name': 'd', 'sequence': 40})],
})
with r1, r2:
r1.make_commits('c', Commit('1111', tree={'g': 'a'}), ref='heads/d')
r2.make_commits('c', Commit('1111', tree={'g': 'a'}), ref='heads/d')
with r1:
r1.make_commits('a', Commit('a', tree={'a': 'a'}), ref="heads/pr1")
@ -241,59 +248,60 @@ def test_add_to_forward_ported(env, config, make_repo, users):
# region port forward
pr1_a_id = to_pr(env, pr1_a)
pr1_b_id = pr1_a_id.forwardport_ids
assert pr1_b_id
with r1:
r1.post_status(pr1_b_id.head, 'success')
env.run_crons()
pr1_c_id = pr1_a_id.forwardport_ids - pr1_b_id
assert pr1_c_id
for branch in 'bcd':
next_pr = env['runbot_merge.pull_requests'].search([
('source_id', '=', pr1_a_id.id),
('target.name', '=', branch),
])
assert next_pr
with r1:
r1.post_status(next_pr.head, 'success')
env.run_crons()
# endregion
# endregion
pr1_b_id, pr1_c_id, pr1_d_id = pr1_a_id.forwardport_ids[::-1]
# new PR must be in fork for labels to actually match
with r2, fork2:
# branch in fork has no owner prefix, but HEAD for cross-repo PR does
fork2.make_commits(r2.commit("b").id, Commit('b', tree={'b': 'b'}), ref=f'heads/{pr1_b_id.refname}')
pr2_b = r2.make_pr(title="b", target="b", head=pr1_b_id.label)
r2.post_status(pr2_b.head, 'success')
env.run_crons()
pr2_b_id = to_pr(env, pr2_b)
assert pr2_b_id.batch_id == pr1_b_id.batch_id
assert len(pr2_b_id.forwardport_ids) == 1, \
assert len(pr2_b_id.forwardport_ids) == 2, \
"since the batch is already forward ported, the new PR should" \
" immediately be forward ported to match"
assert pr2_b_id.forwardport_ids.label == pr1_c_id.label
pr2_a = r1.get_pr(pr1_b_id.number)
assert pr2_b_id.forwardport_ids.mapped('label') == (pr1_d_id | pr1_c_id).mapped('label')
assert not (b := env['forwardport.batches'].search([])),\
f"there should not be any more forward porting, found {b.read(['source', 'pr_id'])}"
known_set = pr1_a_id | pr1_a_id.forwardport_ids | pr2_b_id | pr2_b_id.forwardport_ids
assert not (p := (env['runbot_merge.pull_requests'].search([]) - known_set)), \
"there should not be any PR outside of the sequences we created, "\
f"found {p.read(['source_id', 'parent_id', 'display_name'])}"
with r2:
for pr_id in pr2_b_id | pr2_b_id.forwardport_ids:
r2.post_status(pr_id.head, 'success')
with r1, r2:
pr2_a.post_comment('hansen r+', config['role_reviewer']['token'])
pr2_b.post_comment("hansen r+", config['role_reviewer']['token'])
r1.get_pr(pr1_a_id.forwardport_ids[0].number)\
.post_comment('hansen r+', config['role_reviewer']['token'])
r2.get_pr(pr2_b_id.forwardport_ids[0].number) \
.post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
with r1, r2:
r1.post_status('staging.b', 'success')
r2.post_status('staging.b', 'success')
for branch in 'bcd':
r1.post_status(f'staging.{branch}', 'success')
r2.post_status(f'staging.{branch}', 'success')
env.run_crons()
assert pr1_b_id.state == 'merged'
assert pr2_b_id.state == 'merged'
assert len(pr2_b_id.forwardport_ids) == 1,\
"verify that pr2_b did not get forward ported again on merge"
pr2_c = r2.get_pr(pr2_b_id.forwardport_ids.number)
assert pr2_c.comments == [
seen(env, pr2_c, users),
(users['user'], '''\
@{user} this PR targets c and is the last of the forward-port chain.
To merge the full chain, use
> @hansen r+
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
'''.format_map(users)),
]
assert not env['runbot_merge.pull_requests'].search([]) - known_set, \
"there should not be any PR outside of the sequences we created"
def test_add_to_forward_port_conflict(env, config, make_repo, users):
"""If a PR is added to an existing forward port sequence, and it causes

View File

@ -116,15 +116,15 @@ class PullRequestBackport(models.TransientModel):
if not r.ok:
raise UserError(f"Backport PR creation failure: {r.text}")
backport = self.env['runbot_merge.pull_requests']._from_gh(r.json())
_logger.info("Created backport %s for %s", backport.display_name, self.pr_id.display_name)
backport.write({
'merge_method': self.pr_id.merge_method,
backport = self.env['runbot_merge.pull_requests']._from_gh(
r.json(),
merge_method=self.pr_id.merge_method,
# the backport's own forwardport should stop right before the
# original PR by default
'limit_id': branches[source_idx - 1],
})
limit_id=branches[source_idx - 1],
)
_logger.info("Created backport %s for %s", backport.display_name, self.pr_id.display_name)
self.env['runbot_merge.pull_requests.tagging'].create({
'repository': repo_id.id,
'pull_request': backport.number,

View File

@ -364,23 +364,21 @@ class Batch(models.Model):
_logger.warning("Deleting %s:%s=%s", repo.fp_remote_target, new_branch, d.text)
raise RuntimeError(f"Forwardport failure: {pr.display_name} ({r.text})")
new_pr = PRs._from_gh(r.json())
report_conflicts = has_conflicts and self.fw_policy != 'skipmerge'
new_pr = PRs._from_gh(
r.json(),
merge_method=pr.merge_method,
source_id=source.id,
parent_id=False if report_conflicts else pr.id,
detach_reason=report_conflicts and "conflicts:\n{}".format(
'\n\n'.join(
f"{out}\n{err}".strip()
for _, out, err, _ in filter(None, conflicts.values())
)
),
)
_logger.info("Created forward-port PR %s", new_pr)
new_batch |= new_pr
report_conflicts = has_conflicts and self.fw_policy != 'skipmerge'
# allows PR author to close or skipci
new_pr.write({
'merge_method': pr.merge_method,
'source_id': source.id,
# only link to previous PR of sequence if cherrypick passed
'parent_id': pr.id if not report_conflicts else False,
'detach_reason': "conflicts:\n{}".format('\n\n'.join(
f"{out}\n{err}".strip()
for _, out, err, _ in filter(None, conflicts.values())
)) if report_conflicts else None,
})
if report_conflicts and pr.parent_id and pr.state not in ('merged', 'closed'):
self.env.ref('runbot_merge.forwardport.failure.conflict')._send(
repository=pr.repository,

View File

@ -1397,7 +1397,7 @@ For your own safety I've ignored *everything in your entire comment*.
)
return prs
def _from_gh(self, description, author=None, branch=None, repo=None):
def _from_gh(self, description, author=None, branch=None, repo=None, **kwargs):
if repo is None:
repo = self.env['runbot_merge.repository'].search([
('name', '=', description['base']['repo']['full_name']),
@ -1423,6 +1423,7 @@ For your own safety I've ignored *everything in your entire comment*.
'squash': description['commits'] == 1,
'message': utils.make_message(description),
'draft': description['draft'],
**kwargs,
})
def write(self, vals):