diff --git a/forwardport/models/forwardport.py b/forwardport/models/forwardport.py
index ee3d9b25..00dac319 100644
--- a/forwardport/models/forwardport.py
+++ b/forwardport/models/forwardport.py
@@ -82,25 +82,28 @@ class ForwardPortTasks(models.Model, Queue):
def _process_item(self):
batch = self.batch_id
sentry_sdk.set_tag('forward-porting', batch.prs.mapped('display_name'))
- newbatch = batch.prs._port_forward()
+ newbatch = batch._port_forward()
if not newbatch: # reached end of seq (or batch is empty)
# FIXME: or configuration is fucky so doesn't want to FP (maybe should error and retry?)
_logger.info(
- "Processed %s (from %s): %s (%s) -> end of the sequence",
- self.id, self.source,
- batch, batch.prs
+ "Processed %s from %s (%s) -> end of the sequence",
+ batch, self.source, batch.prs.mapped('display_name'),
)
return
_logger.info(
- "Processed %s (from %s): %s (%s) -> %s (%s)",
- self.id, self.source,
- batch, batch.prs,
- newbatch, newbatch.prs,
+ "Processed %s from %s (%s) -> %s (%s)",
+ batch, self.source, ', '.join(batch.prs.mapped('display_name')),
+ newbatch, ', '.join(newbatch.prs.mapped('display_name')),
)
- # insert new batch in ancestry sequence unless conflict (= no parent)
+ # insert new batch in ancestry sequence
if self.source == 'insert':
+ self.env['runbot_merge.batch'].search([
+ ('parent_id', '=', batch.id),
+ ('id', '!=', newbatch.id),
+ ]).parent_id = newbatch.id
+ # insert new PRs in ancestry sequence unless conflict (= no parent)
for pr in newbatch.prs:
if not pr.parent_id:
break
diff --git a/forwardport/models/project.py b/forwardport/models/project.py
index d4a30f21..137e69ea 100644
--- a/forwardport/models/project.py
+++ b/forwardport/models/project.py
@@ -11,31 +11,22 @@ means PR creation is trickier (as mergebot assumes opened event will always
lead to PR creation but fpbot wants to attach meaning to the PR when setting
it up), ...
"""
-import ast
-import base64
-import contextlib
import datetime
import itertools
import json
import logging
import operator
-import os
-import re
import subprocess
import tempfile
import typing
-from operator import itemgetter
from pathlib import Path
import dateutil.relativedelta
-import psycopg2.errors
import requests
from odoo import models, fields, api
-from odoo.osv import expression
from odoo.exceptions import UserError
-from odoo.tools.misc import topological_sort, groupby, Reverse
-from odoo.tools.sql import reverse_order
+from odoo.tools.misc import topological_sort, groupby
from odoo.tools.appdirs import user_cache_dir
from odoo.addons.base.models.res_partner import Partner
from odoo.addons.runbot_merge import git
@@ -43,8 +34,6 @@ from odoo.addons.runbot_merge.models.pull_requests import Branch
from odoo.addons.runbot_merge.models.stagings_create import Message
-footer = '\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n'
-
DEFAULT_DELTA = dateutil.relativedelta.relativedelta(days=3)
_logger = logging.getLogger('odoo.addons.forwardport')
@@ -72,7 +61,7 @@ class Project(models.Model):
because no CI or CI failed), create followup, as if the branch had been
originally disabled (and thus skipped over)
"""
- PRs = self.env['runbot_merge.pull_requests']
+ Batch = self.env['runbot_merge.batch']
for p in self:
actives = previously_active_branches[p]
for deactivated in p.branch_ids.filtered(lambda b: not b.active) & actives:
@@ -80,26 +69,27 @@ class Project(models.Model):
# and it doesn't have a child (e.g. CI failed), enqueue a forward
# port as if the now deactivated branch had been skipped over (which
# is the normal fw behaviour)
- extant = PRs.search([
+ extant = Batch.search([
('target', '=', deactivated.id),
- ('source_id.limit_id', '!=', deactivated.id),
- ('state', 'not in', ('closed', 'merged')),
+ # FIXME: this should probably be *all* the PRs of the batch
+ ('prs.limit_id', '!=', deactivated.id),
+ ('merge_date', '=', False),
])
- for p in extant.with_context(force_fw=True):
- next_target = p.source_id._find_next_target(p)
+ for b in extant.with_context(force_fw=True):
+ next_target = b._find_next_target()
# should not happen since we already filtered out limits
if not next_target:
continue
# check if it has a descendant in the next branch, if so skip
- if PRs.search_count([
- ('source_id', '=', p.source_id.id),
+ if Batch.search_count([
+ ('parent_id', '=', b.id),
('target', '=', next_target.id)
]):
continue
# otherwise enqueue a followup
- p._schedule_fp_followup()
+ b._schedule_fp_followup()
def _insert_intermediate_prs(self, branches_before):
"""If new branches have been added to the sequence inbetween existing
@@ -161,13 +151,6 @@ class Project(models.Model):
'source': 'insert',
})
- def _forward_port_ordered(self, domain=()):
- Branches = self.env['runbot_merge.branch']
- return Branches.search(expression.AND([
- [('project_id', '=', self.id)],
- domain or [],
- ]), order=reverse_order(Branches._order))
-
class Repository(models.Model):
_inherit = 'runbot_merge.repository'
@@ -201,12 +184,6 @@ class PullRequests(models.Model):
if existing:
return existing
- if 'limit_id' not in vals:
- branch = self.env['runbot_merge.branch'].browse(vals['target'])
- repo = self.env['runbot_merge.repository'].browse(vals['repository'])
- vals['limit_id'] = branch.project_id._forward_port_ordered(
- ast.literal_eval(repo.branch_filter or '[]')
- )[-1].id
if vals.get('parent_id') and 'source_id' not in vals:
vals['source_id'] = self.browse(vals['parent_id']).root_id.id
return super().create(vals)
@@ -273,96 +250,6 @@ class PullRequests(models.Model):
})
return r
- def _maybe_update_limit(self, limit: str) -> typing.Tuple[bool, str]:
- limit_id = self.env['runbot_merge.branch'].with_context(active_test=False).search([
- ('project_id', '=', self.repository.project_id.id),
- ('name', '=', limit),
- ])
- if not limit_id:
- return True, f"there is no branch {limit!r}, it can't be used as a forward port target."
-
- if limit_id != self.target and not limit_id.active:
- return True, f"branch {limit_id.name!r} is disabled, it can't be used as a forward port target."
-
- # not forward ported yet, just acknowledge the request
- if not self.source_id and self.state != 'merged':
- self.limit_id = limit_id
- if branch_key(limit_id) <= branch_key(self.target):
- return False, "Forward-port disabled."
- else:
- return False, f"Forward-porting to {limit_id.name!r}."
-
- # if the PR has been forwardported
- prs = (self | self.forwardport_ids | self.source_id | self.source_id.forwardport_ids)
- tip = max(prs, key=pr_key)
- # if the fp tip was closed it's fine
- if tip.state == 'closed':
- return True, f"{tip.display_name} is closed, no forward porting is going on"
-
- prs.limit_id = limit_id
-
- real_limit = max(limit_id, tip.target, key=branch_key)
-
- addendum = ''
- # check if tip was queued for forward porting, try to cancel if we're
- # supposed to stop here
- if real_limit == tip.target and (task := self.env['forwardport.batches'].search([('batch_id', '=', tip.batch_id.id)])):
- try:
- with self.env.cr.savepoint():
- self.env.cr.execute(
- "SELECT FROM forwardport_batches "
- "WHERE id = %s FOR UPDATE NOWAIT",
- [task.id])
- except psycopg2.errors.LockNotAvailable:
- # row locked = port occurring and probably going to succeed,
- # so next(real_limit) likely a done deal already
- return True, (
- f"Forward port of {tip.display_name} likely already "
- f"ongoing, unable to cancel, close next forward port "
- f"when it completes.")
- else:
- self.env.cr.execute("DELETE FROM forwardport_batches WHERE id = %s", [task.id])
-
- if real_limit != tip.target:
- # forward porting was previously stopped at tip, and we want it to
- # resume
- if tip.state == 'merged':
- self.env['forwardport.batches'].create({
- 'batch_id': tip.batch_id.id,
- 'source': 'fp' if tip.parent_id else 'merge',
- })
- resumed = tip
- else:
- resumed = tip._schedule_fp_followup()
- if resumed:
- addendum += f', resuming forward-port stopped at {tip.display_name}'
-
- if real_limit != limit_id:
- addendum += f' (instead of the requested {limit_id.name!r} because {tip.display_name} already exists)'
-
- # get a "stable" root rather than self's to avoid divertences between
- # PRs across a root divide (where one post-root would point to the root,
- # and one pre-root would point to the source, or a previous root)
- root = tip.root_id
- # reference the root being forward ported unless we are the root
- root_ref = '' if root == self else f' {root.display_name}'
- msg = f"Forward-porting{root_ref} to {real_limit.name!r}{addendum}."
- # send a message to the source & root except for self, if they exist
- root_msg = f'Forward-porting to {real_limit.name!r} (from {self.display_name}).'
- self.env['runbot_merge.pull_requests.feedback'].create([
- {
- 'repository': p.repository.id,
- 'pull_request': p.number,
- 'message': root_msg,
- 'token_field': 'fp_github_token',
- }
- # send messages to source and root unless root is self (as it
- # already gets the normal message)
- for p in (self.source_id | root) - self
- ])
-
- return False, msg
-
def _notify_ci_failed(self, ci):
# only care about FP PRs which are not staged / merged yet
# NB: probably ignore approved PRs as normal message will handle them?
@@ -378,89 +265,9 @@ class PullRequests(models.Model):
def _validate(self, statuses):
failed = super()._validate(statuses)
- self._schedule_fp_followup()
+ self.batch_id._schedule_fp_followup()
return failed
- def _schedule_fp_followup(self):
- _logger = logging.getLogger(__name__).getChild('forwardport.next')
- # if the PR has a parent and is CI-validated, enqueue the next PR
- scheduled = self.browse(())
- for pr in self:
- _logger.info('Checking if forward-port %s (%s)', pr.display_name, pr)
- if not pr.parent_id:
- _logger.info('-> no parent %s (%s)', pr.display_name, pr.parent_id)
- continue
- if not self.env.context.get('force_fw') and self.source_id.batch_id.fw_policy != 'skipci' and pr.state not in ['validated', 'ready']:
- _logger.info('-> wrong state %s (%s)', pr.display_name, pr.state)
- continue
-
- # check if we've already forward-ported this branch
- source = pr.source_id or pr
- next_target = source._find_next_target(pr)
- if not next_target:
- _logger.info("-> forward port done (no next target)")
- continue
-
- if n := self.search([
- ('source_id', '=', source.id),
- ('target', '=', next_target.id),
- ], limit=1):
- _logger.info('-> already forward-ported (%s)', n.display_name)
- continue
-
- batch = pr.batch_id
- # otherwise check if we already have a pending forward port
- _logger.info("%s %s %s", pr.display_name, batch, ', '.join(batch.mapped('prs.display_name')))
- if self.env['forwardport.batches'].search_count([('batch_id', '=', batch.id)]):
- _logger.warning('-> already recorded')
- continue
-
- # check if batch-mate are all valid
- mates = batch.prs
- # wait until all of them are validated or ready
- if not self.env.context.get('force_fw') and any(pr.source_id.batch_id.fw_policy != 'skipci' and pr.state not in ('validated', 'ready') for pr in mates):
- _logger.info("-> not ready (%s)", [(pr.display_name, pr.state) for pr in mates])
- continue
-
- # check that there's no weird-ass state
- if not all(pr.parent_id for pr in mates):
- _logger.warning("Found a batch (%s) with only some PRs having parents, ignoring", mates)
- continue
- if self.search_count([('parent_id', 'in', mates.ids)]):
- _logger.warning("Found a batch (%s) with only some of the PRs having children", mates)
- continue
-
- _logger.info('-> ok')
- self.env['forwardport.batches'].create({
- 'batch_id': batch.id,
- 'source': 'fp',
- })
- scheduled |= pr
- return scheduled
-
- def _find_next_target(self, reference):
- """ Finds the branch between target and limit_id which follows
- reference
- """
- if reference.target == self.limit_id:
- return
- # NOTE: assumes even disabled branches are properly sequenced, would
- # probably be a good idea to have the FP view show all branches
- branches = list(self.target.project_id
- .with_context(active_test=False)
- ._forward_port_ordered(ast.literal_eval(self.repository.branch_filter or '[]')))
-
- # get all branches between max(root.target, ref.target) (excluded) and limit (included)
- from_ = max(branches.index(self.target), branches.index(reference.target))
- to_ = branches.index(self.limit_id)
-
- # return the first active branch in the set
- return next((
- branch
- for branch in branches[from_+1:to_+1]
- if branch.active
- ), None)
-
def _commits_lazy(self):
s = requests.Session()
s.headers['Authorization'] = 'token %s' % self.repository.project_id.fp_github_token
@@ -490,216 +297,6 @@ class PullRequests(models.Model):
return sorted(commits, key=lambda c: idx[c['sha']])
- def _port_forward(self):
- if not self:
- return
-
- all_sources = [(p.source_id or p) for p in self]
- all_targets = [s._find_next_target(p) for s, p in zip(all_sources, self)]
-
- ref = self[0]
- base = all_sources[0]
- target = all_targets[0]
- if target is None:
- _logger.info(
- "Will not forward-port %s: no next target",
- ref.display_name,
- )
- return # QUESTION: do the prs need to be updated?
-
- # check if the PRs have already been forward-ported: is there a PR
- # with the same source targeting the next branch in the series
- for source in all_sources:
- if self.search_count([('source_id', '=', source.id), ('target', '=', target.id)]):
- _logger.info("Will not forward-port %s: already ported", ref.display_name)
- return
-
- # check if all PRs in the batch have the same "next target" , bail if
- # that's not the case as it doesn't make sense for forward one PR from
- # a to b and a linked pr from a to c
- different_target = next((t for t in all_targets if t != target), None)
- if different_target:
- different_pr = next(p for p, t in zip(self, all_targets) if t == different_target)
- for pr, t in zip(self, all_targets):
- linked, other = different_pr, different_target
- if t != target:
- linked, other = ref, target
- self.env.ref('runbot_merge.forwardport.failure.discrepancy')._send(
- repository=pr.repository,
- pull_request=pr.number,
- token_field='fp_github_token',
- format_args={'pr': pr, 'linked': linked, 'next': t.name, 'other': other.name},
- )
- _logger.warning(
- "Cancelling forward-port of %s: found different next branches (%s)",
- self, all_targets
- )
- return
-
- proj = self.mapped('target.project_id')
- if not proj.fp_github_token:
- _logger.warning(
- "Can not forward-port %s: no token on project %s",
- ref.display_name,
- proj.name
- )
- return
-
- notarget = [p.repository.name for p in self if not p.repository.fp_remote_target]
- if notarget:
- _logger.error(
- "Can not forward-port %s: repos %s don't have a remote configured",
- self, ', '.join(notarget)
- )
- return
-
- # take only the branch bit
- new_branch = '%s-%s-%s-fw' % (
- target.name,
- base.refname,
- # avoid collisions between fp branches (labels can be reused
- # or conflict especially as we're chopping off the owner)
- base64.urlsafe_b64encode(os.urandom(3)).decode()
- )
- # TODO: send outputs to logging?
- conflicts = {}
- with contextlib.ExitStack() as s:
- for pr in self:
- conflicts[pr], working_copy = pr._create_fp_branch(
- target, new_branch, s)
-
- working_copy.push('target', new_branch)
-
- gh = requests.Session()
- gh.headers['Authorization'] = 'token %s' % proj.fp_github_token
- has_conflicts = any(conflicts.values())
- # problemo: this should forward port a batch at a time, if porting
- # one of the PRs in the batch fails is huge problem, though this loop
- # only concerns itself with the creation of the followup objects so...
- new_batch = self.browse(())
- self.env.cr.execute('LOCK runbot_merge_pull_requests IN SHARE MODE')
- for pr in self:
- owner, _ = pr.repository.fp_remote_target.split('/', 1)
- source = pr.source_id or pr
- root = pr.root_id
-
- message = source.message + '\n\n' + '\n'.join(
- "Forward-Port-Of: %s" % p.display_name
- for p in root | source
- )
-
- title, body = re.match(r'(?P
[^\n]+)\n*(?P.*)', message, flags=re.DOTALL).groups()
- r = gh.post(f'https://api.github.com/repos/{pr.repository.name}/pulls', json={
- 'base': target.name,
- 'head': f'{owner}:{new_branch}',
- 'title': '[FW]' + (' ' if title[0] != '[' else '') + title,
- 'body': body
- })
- if not r.ok:
- _logger.warning("Failed to create forward-port PR for %s, deleting branches", pr.display_name)
- # delete all the branches this should automatically close the
- # PRs if we've created any. Using the API here is probably
- # simpler than going through the working copies
- for repo in self.mapped('repository'):
- d = gh.delete(f'https://api.github.com/repos/{repo.fp_remote_target}/git/refs/heads/{new_branch}')
- if d.ok:
- _logger.info("Deleting %s:%s=success", repo.fp_remote_target, new_branch)
- else:
- _logger.warning("Deleting %s:%s=%s", repo.fp_remote_target, new_branch, d.text)
- raise RuntimeError("Forwardport failure: %s (%s)" % (pr.display_name, r.text))
-
- new_pr = self._from_gh(r.json())
- _logger.info("Created forward-port PR %s", new_pr)
- new_batch |= new_pr
-
- # allows PR author to close or skipci
- new_pr.write({
- 'merge_method': pr.merge_method,
- 'source_id': source.id,
- # only link to previous PR of sequence if cherrypick passed
- 'parent_id': pr.id if not has_conflicts else False,
- 'detach_reason': "conflicts: {}".format(
- f'\n{conflicts[pr]}\n{conflicts[pr]}'.strip()
- ) if has_conflicts else None,
- })
- if has_conflicts and pr.parent_id and pr.state not in ('merged', 'closed'):
- self.env.ref('runbot_merge.forwardport.failure.conflict')._send(
- repository=pr.repository,
- pull_request=pr.number,
- token_field='fp_github_token',
- format_args={'source': source, 'pr': pr, 'new': new_pr, 'footer': footer},
- )
-
- for pr, new_pr in zip(self, new_batch):
- (h, out, err, hh) = conflicts.get(pr) or (None, None, None, None)
-
- if h:
- sout = serr = ''
- if out.strip():
- sout = f"\nstdout:\n```\n{out}\n```\n"
- if err.strip():
- serr = f"\nstderr:\n```\n{err}\n```\n"
-
- lines = ''
- if len(hh) > 1:
- lines = '\n' + ''.join(
- '* %s%s\n' % (sha, ' <- on this commit' if sha == h else '')
- for sha in hh
- )
- template = 'runbot_merge.forwardport.failure'
- format_args = {
- 'pr': new_pr,
- 'commits': lines,
- 'stdout': sout,
- 'stderr': serr,
- 'footer': footer,
- }
- elif has_conflicts:
- template = 'runbot_merge.forwardport.linked'
- format_args = {
- 'pr': new_pr,
- 'siblings': ', '.join(p.display_name for p in (new_batch - new_pr)),
- 'footer': footer,
- }
- elif base._find_next_target(new_pr) is None:
- ancestors = "".join(
- "* %s\n" % p.display_name
- for p in pr._iter_ancestors()
- if p.parent_id
- )
- template = 'runbot_merge.forwardport.final'
- format_args = {
- 'pr': new_pr,
- 'containing': ' containing:' if ancestors else '.',
- 'ancestors': ancestors,
- 'footer': footer,
- }
- else:
- template = 'runbot_merge.forwardport.intermediate'
- format_args = {
- 'pr': new_pr,
- 'footer': footer,
- }
- self.env.ref(template)._send(
- repository=new_pr.repository,
- pull_request=new_pr.number,
- token_field='fp_github_token',
- format_args=format_args,
- )
-
- labels = ['forwardport']
- if has_conflicts:
- labels.append('conflict')
- self.env['runbot_merge.pull_requests.tagging'].create({
- 'repository': new_pr.repository.id,
- 'pull_request': new_pr.number,
- 'tags_add': labels,
- })
-
- # try to schedule followup
- new_batch[0]._schedule_fp_followup()
- return new_batch.batch_id
-
def _create_fp_branch(self, target_branch, fp_branch_name, cleanup):
""" Creates a forward-port for the current PR to ``target_branch`` under
``fp_branch_name``.
@@ -969,44 +566,6 @@ stderr:
}
)
-class Batch(models.Model):
- _inherit = 'runbot_merge.batch'
-
- def write(self, vals):
- if vals.get('merge_date'):
- self.env['forwardport.branch_remover'].create([
- {'pr_id': p.id}
- for b in self
- if not b.merge_date
- for p in b.prs
- ])
-
- super().write(vals)
-
- # if we change the policy to skip CI, schedule followups on existing FPs
- # FIXME: although this should be managed by the command handler already,
- # this should probably allow setting the fw policy on any batch
- # of the sequence
- if vals.get('fw_policy') == 'skipci' and self.merge_date:
- self.env['runbot_merge.pull_requests'].search([
- ('source_id', '=', self.prs[:1].id),
- ('state', 'not in', ('closed', 'merged')),
- ])._schedule_fp_followup()
-
- return True
-
-# ordering is a bit unintuitive because the lowest sequence (and name)
-# is the last link of the fp chain, reasoning is a bit more natural the
-# other way around (highest object is the last), especially with Python
-# not really having lazy sorts in the stdlib
-def branch_key(b: Branch, /, _key=itemgetter('sequence', 'name')):
- return Reverse(_key(b))
-
-
-def pr_key(p: PullRequests, /):
- return branch_key(p.target)
-
-
class Stagings(models.Model):
_inherit = 'runbot_merge.stagings'
@@ -1014,7 +573,7 @@ class Stagings(models.Model):
r = super().write(vals)
# we've just deactivated a successful staging (so it got ~merged)
if vals.get('active') is False and self.state == 'success':
- # check al batches to see if they should be forward ported
+ # check all batches to see if they should be forward ported
for b in self.with_context(active_test=False).batch_ids:
# if all PRs of a batch have parents they're part of an FP
# sequence and thus handled separately, otherwise they're
diff --git a/forwardport/tests/test_limit.py b/forwardport/tests/test_limit.py
index 5d7d2313..071d2a54 100644
--- a/forwardport/tests/test_limit.py
+++ b/forwardport/tests/test_limit.py
@@ -136,7 +136,7 @@ def test_limit_after_merge(env, config, make_repo, users):
env.run_crons()
p1, p2 = env['runbot_merge.pull_requests'].search([], order='number')
- assert p1.limit_id == p2.limit_id == branch_c, "check that limit is correctly set"
+ assert p1.limit_id == p2.limit_id == env['runbot_merge.branch'].browse(())
pr2 = prod.get_pr(p2.number)
with prod:
pr1.post_comment('hansen up to b', reviewer)
diff --git a/forwardport/tests/test_overrides.py b/forwardport/tests/test_overrides.py
index eb5acf12..e41d2409 100644
--- a/forwardport/tests/test_overrides.py
+++ b/forwardport/tests/test_overrides.py
@@ -27,7 +27,7 @@ def test_override_inherited(env, config, make_repo, users):
original = env['runbot_merge.pull_requests'].search([('repository.name', '=', repo.name), ('number', '=', pr.number)])
assert original.state == 'ready'
- assert original.limit_id.name == 'c'
+ assert not original.limit_id
with repo:
repo.post_status('staging.a', 'success')
diff --git a/forwardport/tests/test_simple.py b/forwardport/tests/test_simple.py
index cc8bb3c1..c086bd9b 100644
--- a/forwardport/tests/test_simple.py
+++ b/forwardport/tests/test_simple.py
@@ -1052,9 +1052,6 @@ class TestRecognizeCommands:
[a] = env['runbot_merge.branch'].search([
('name', '=', 'a')
])
- [c] = env['runbot_merge.branch'].search([
- ('name', '=', 'c')
- ])
names = [
"hansen",
@@ -1064,12 +1061,12 @@ class TestRecognizeCommands:
]
for n in names:
- assert pr_id.limit_id == c
+ assert not pr_id.limit_id
with repo:
pr.post_comment(f'@{n} up to a', config['role_reviewer']['token'])
assert pr_id.limit_id == a
# reset state
- pr_id.write({'limit_id': c.id})
+ pr_id.limit_id = False
# FIXME: remove / merge into mergebot tests
@pytest.mark.parametrize('indent', ['', '\N{SPACE}', '\N{SPACE}'*4, '\N{TAB}'])
@@ -1081,11 +1078,8 @@ class TestRecognizeCommands:
[a] = env['runbot_merge.branch'].search([
('name', '=', 'a')
])
- [c] = env['runbot_merge.branch'].search([
- ('name', '=', 'c')
- ])
- assert pr_id.limit_id == c
+ assert not pr_id.limit_id
with repo:
pr.post_comment(f'{indent}@hansen up to a', config['role_reviewer']['token'])
assert pr_id.limit_id == a
diff --git a/runbot_merge/data/runbot_merge.pull_requests.feedback.template.csv b/runbot_merge/data/runbot_merge.pull_requests.feedback.template.csv
index 4dd5678f..d9e64d2f 100644
--- a/runbot_merge/data/runbot_merge.pull_requests.feedback.template.csv
+++ b/runbot_merge/data/runbot_merge.pull_requests.feedback.template.csv
@@ -167,7 +167,7 @@ pr: the new forward port
containing: label changing depending whether there are ancestors to merge
ancestors: markdown formatted list of parent PRs which can be approved as part of the chain
footer: a footer"
-runbot_merge.forwardport.intermediate,"This PR targets {pr.target.name} and is part of the forward-port chain. Further PRs will be created up to {pr.limit_id.name}.
+runbot_merge.forwardport.intermediate,"This PR targets {pr.target.name} and is part of the forward-port chain. Further PRs will be created up to {pr.limit_pretty}.
{footer}","Comment when a forward port was succcessfully created but is not the last of the line.
pr: the new forward port
diff --git a/runbot_merge/models/batch.py b/runbot_merge/models/batch.py
index 8f819349..30f69e9f 100644
--- a/runbot_merge/models/batch.py
+++ b/runbot_merge/models/batch.py
@@ -1,10 +1,25 @@
from __future__ import annotations
+import ast
+import base64
+import contextlib
+import logging
+import os
+import re
+import typing
+from collections.abc import Iterator
+
+import requests
+
from odoo import models, fields, api
-from odoo.tools import create_index
+from .pull_requests import PullRequests, Branch
from .utils import enum
+_logger = logging.getLogger(__name__)
+FOOTER = '\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n'
+
+
class Batch(models.Model):
""" A batch is a "horizontal" grouping of *codependent* PRs: PRs with
the same label & target but for different repositories. These are
@@ -15,8 +30,9 @@ class Batch(models.Model):
_name = 'runbot_merge.batch'
_description = "batch of pull request"
_inherit = ['mail.thread']
+ _parent_store = True
- target = fields.Many2one('runbot_merge.branch', required=True, index=True)
+ target = fields.Many2one('runbot_merge.branch', store=True, compute='_compute_target')
staging_ids = fields.Many2many('runbot_merge.stagings')
split_id = fields.Many2one('runbot_merge.split', index=True)
@@ -44,12 +60,45 @@ class Batch(models.Model):
('default', "Default"),
('priority', "Priority"),
('alone', "Alone"),
- ], default='default', index=True, group_operator=None, required=True,
+ ], default='default', group_operator=None, required=True,
column_type=enum(_name, 'priority'),
)
blocked = fields.Char(store=True, compute="_compute_stageable")
+ # unlike on PRs, this does not get detached... ? (because batches can be
+ # partially detached so that's a PR-level concern)
+ parent_path = fields.Char(index=True)
+ parent_id = fields.Many2one("runbot_merge.batch")
+ genealogy_ids = fields.Many2many(
+ "runbot_merge.batch",
+ compute="_compute_genealogy",
+ context={"active_test": False},
+ )
+
+ @property
+ def source(self):
+ return self.browse(map(int, self.parent_path.split('/', 1)[:1]))
+
+ def descendants(self, include_self: bool = False) -> Iterator[Batch]:
+ # in DB both will prefix-match on the literal prefix then apply a
+ # trivial filter (even though the filter is technically unnecessary for
+ # the first form), doing it like this means we don't have to `- self`
+ # in the `not includ_self` case
+ if include_self:
+ pattern = self.parent_path + '%'
+ else:
+ pattern = self.parent_path + '_%'
+
+ return self.search([("parent_path", '=like', pattern)])
+
+ # also depends on all the descendants of the source or sth
+ @api.depends('parent_path')
+ def _compute_genealogy(self):
+ for batch in self:
+ sid = next(iter(batch.parent_path.split('/', 1)))
+ batch.genealogy_ids = self.search([("parent_path", "=like", f"{sid}/%")], order="parent_path")
+
def _auto_init(self):
for field in self._fields.values():
if not isinstance(field, fields.Selection) or field.column_type[0] == 'varchar':
@@ -65,17 +114,36 @@ class Batch(models.Model):
super()._auto_init()
- create_index(
- self.env.cr,
- "runbot_merge_batch_unblocked_idx",
- "runbot_merge_batch",
- ["(blocked is null), priority"],
- )
+ self.env.cr.execute("""
+ CREATE INDEX IF NOT EXISTS runbot_merge_batch_ready_idx
+ ON runbot_merge_batch (target, priority)
+ WHERE blocked IS NULL;
+
+ CREATE INDEX IF NOT EXISTS runbot_merge_batch_parent_id_idx
+ ON runbot_merge_batch (parent_id)
+ WHERE parent_id IS NOT NULL;
+ """)
+
+ @api.depends("prs.target")
+ def _compute_target(self):
+ for batch in self:
+ if len(batch.prs) == 1:
+ batch.target = batch.prs.target
+ else:
+ targets = set(batch.prs.mapped('target'))
+ if not targets:
+ targets = set(batch.prs.mapped('target'))
+ if len(targets) == 1:
+ batch.target = targets.pop()
+ else:
+ batch.target = False
+
@api.depends(
"merge_date",
"prs.error", "prs.draft", "prs.squash", "prs.merge_method",
"skipchecks", "prs.status", "prs.reviewed_by",
+ "prs.target"
)
def _compute_stageable(self):
for batch in self:
@@ -84,9 +152,7 @@ class Batch(models.Model):
elif blocking := batch.prs.filtered(
lambda p: p.error or p.draft or not (p.squash or p.merge_method)
):
- batch.blocked = "Pull request(s) %s blocked." % (
- p.display_name for p in blocking
- )
+ batch.blocked = "Pull request(s) %s blocked." % ', '.join(blocking.mapped('display_name'))
elif not batch.skipchecks and (unready := batch.prs.filtered(
lambda p: not (p.reviewed_by and p.status == "success")
)):
@@ -98,6 +164,8 @@ class Batch(models.Model):
unvalidated and f"{unvalidated} are waiting for CI",
failed and f"{failed} have failed CI",
]))
+ elif len(targets := batch.prs.mapped('target')) > 1:
+ batch.blocked = f"Multiple target branches: {', '.join(targets.mapped('name'))!r}"
else:
if batch.blocked and batch.cancel_staging:
batch.target.active_staging_id.cancel(
@@ -107,3 +175,333 @@ class Batch(models.Model):
', '.join(batch.prs.mapped('display_name')),
)
batch.blocked = False
+
+
+ def _port_forward(self):
+ if not self:
+ return
+
+ proj = self.target.project_id
+ if not proj.fp_github_token:
+ _logger.warning(
+ "Can not forward-port %s (%s): no token on project %s",
+ self,
+ ', '.join(self.prs.mapped('display_name')),
+ proj.name
+ )
+ return
+
+ notarget = [r.name for r in self.prs.repository if not r.fp_remote_target]
+ if notarget:
+ _logger.error(
+ "Can not forward-port %s (%s): repos %s don't have a forward port remote configured",
+ self,
+ ', '.join(self.prs.mapped('display_name')),
+ ', '.join(notarget),
+ )
+ return
+
+ all_sources = [(p.source_id or p) for p in self.prs]
+ all_targets = [p._find_next_target() for p in self.prs]
+
+ if all(t is None for t in all_targets):
+ # TODO: maybe add a feedback message?
+ _logger.info(
+ "Will not forward port %s (%s): no next target",
+ self,
+ ', '.join(self.prs.mapped('display_name'))
+ )
+ return
+
+ for p, t in zip(self.prs, all_targets):
+ if t is None:
+ _logger.info("Skip forward porting %s (of %s): no next target", p.display_name, self)
+
+ # all the PRs *with a next target* should have the same, we can have PRs
+ # stopping forward port earlier but skipping... probably not
+ targets = set(filter(None, all_targets))
+ if len(targets) != 1:
+ m = dict(zip(all_targets, self.prs))
+ m.pop(None, None)
+ mm = dict(zip(self.prs, all_targets))
+ for pr in self.prs:
+ t = mm[pr]
+ # if a PR is skipped, don't flag it for discrepancy
+ if not t:
+ continue
+
+ other, linked = next((target, p) for target, p in m.items() if target != t)
+ self.env.ref('runbot_merge.forwardport.failure.discrepancy')._send(
+ repository=pr.repository,
+ pull_request=pr.number,
+ token_field='fp_github_token',
+ format_args={'pr': pr, 'linked': linked, 'next': t.name, 'other': other.name},
+ )
+ _logger.warning(
+ "Cancelling forward-port of %s (%s): found different next branches (%s)",
+ self,
+ ', '.join(self.prs.mapped('display_name')),
+ ', '.join(t.name for t in targets),
+ )
+ return
+
+ target = targets.pop()
+ # this is run by the cron, no need to check if otherwise scheduled:
+ # either the scheduled job is this one, or it's an other scheduling
+ # which will run after this one and will see the port already exists
+ if self.search_count([('parent_id', '=', self.id), ('target', '=', target.id)]):
+ _logger.warning(
+ "Will not forward-port %s (%s): already ported",
+ self,
+ ', '.join(self.prs.mapped('display_name'))
+ )
+ return
+
+ # the base PR is the PR with the "oldest" target
+ base = max(all_sources, key=lambda p: (p.target.sequence, p.target.name))
+ # take only the branch bit
+ new_branch = '%s-%s-%s-fw' % (
+ target.name,
+ base.refname,
+ # avoid collisions between fp branches (labels can be reused
+ # or conflict especially as we're chopping off the owner)
+ base64.urlsafe_b64encode(os.urandom(3)).decode()
+ )
+ conflicts = {}
+ with contextlib.ExitStack() as s:
+ for pr in self.prs:
+ conflicts[pr], working_copy = pr._create_fp_branch(
+ target, new_branch, s)
+
+ working_copy.push('target', new_branch)
+
+ gh = requests.Session()
+ gh.headers['Authorization'] = 'token %s' % proj.fp_github_token
+ has_conflicts = any(conflicts.values())
+ # could create a batch here but then we'd have to update `_from_gh` to
+ # take a batch and then `create` to not automatically resolve batches,
+ # easier to not do that.
+ new_batch = self.env['runbot_merge.pull_requests'].browse(())
+ self.env.cr.execute('LOCK runbot_merge_pull_requests IN SHARE MODE')
+ for pr in self.prs:
+ owner, _ = pr.repository.fp_remote_target.split('/', 1)
+ source = pr.source_id or pr
+ root = pr.root_id
+
+ message = source.message + '\n\n' + '\n'.join(
+ "Forward-Port-Of: %s" % p.display_name
+ for p in root | source
+ )
+
+ title, body = re.match(r'(?P[^\n]+)\n*(?P.*)', message, flags=re.DOTALL).groups()
+ r = gh.post(f'https://api.github.com/repos/{pr.repository.name}/pulls', json={
+ 'base': target.name,
+ 'head': f'{owner}:{new_branch}',
+ 'title': '[FW]' + (' ' if title[0] != '[' else '') + title,
+ 'body': body
+ })
+ if not r.ok:
+ _logger.warning("Failed to create forward-port PR for %s, deleting branches", pr.display_name)
+ # delete all the branches this should automatically close the
+ # PRs if we've created any. Using the API here is probably
+ # simpler than going through the working copies
+ for repo in self.prs.mapped('repository'):
+ d = gh.delete(f'https://api.github.com/repos/{repo.fp_remote_target}/git/refs/heads/{new_branch}')
+ if d.ok:
+ _logger.info("Deleting %s:%s=success", repo.fp_remote_target, new_branch)
+ else:
+ _logger.warning("Deleting %s:%s=%s", repo.fp_remote_target, new_branch, d.text)
+ raise RuntimeError("Forwardport failure: %s (%s)" % (pr.display_name, r.text))
+
+ new_pr = pr._from_gh(r.json())
+ _logger.info("Created forward-port PR %s", new_pr)
+ new_batch |= new_pr
+
+ # allows PR author to close or skipci
+ new_pr.write({
+ 'merge_method': pr.merge_method,
+ 'source_id': source.id,
+ # only link to previous PR of sequence if cherrypick passed
+ 'parent_id': pr.id if not has_conflicts else False,
+ 'detach_reason': "conflicts: {}".format(
+ f'\n{conflicts[pr]}\n{conflicts[pr]}'.strip()
+ ) if has_conflicts else None,
+ })
+ if has_conflicts and pr.parent_id and pr.state not in ('merged', 'closed'):
+ self.env.ref('runbot_merge.forwardport.failure.conflict')._send(
+ repository=pr.repository,
+ pull_request=pr.number,
+ token_field='fp_github_token',
+ format_args={'source': source, 'pr': pr, 'new': new_pr, 'footer': FOOTER},
+ )
+
+ for pr, new_pr in zip(self.prs, new_batch):
+ (h, out, err, hh) = conflicts.get(pr) or (None, None, None, None)
+
+ if h:
+ sout = serr = ''
+ if out.strip():
+ sout = f"\nstdout:\n```\n{out}\n```\n"
+ if err.strip():
+ serr = f"\nstderr:\n```\n{err}\n```\n"
+
+ lines = ''
+ if len(hh) > 1:
+ lines = '\n' + ''.join(
+ '* %s%s\n' % (sha, ' <- on this commit' if sha == h else '')
+ for sha in hh
+ )
+ template = 'runbot_merge.forwardport.failure'
+ format_args = {
+ 'pr': new_pr,
+ 'commits': lines,
+ 'stdout': sout,
+ 'stderr': serr,
+ 'footer': FOOTER,
+ }
+ elif has_conflicts:
+ template = 'runbot_merge.forwardport.linked'
+ format_args = {
+ 'pr': new_pr,
+ 'siblings': ', '.join(p.display_name for p in (new_batch - new_pr)),
+ 'footer': FOOTER,
+ }
+ elif not new_pr._find_next_target():
+ ancestors = "".join(
+ f"* {p.display_name}\n"
+ for p in pr._iter_ancestors()
+ if p.parent_id
+ )
+ template = 'runbot_merge.forwardport.final'
+ format_args = {
+ 'pr': new_pr,
+ 'containing': ' containing:' if ancestors else '.',
+ 'ancestors': ancestors,
+ 'footer': FOOTER,
+ }
+ else:
+ template = 'runbot_merge.forwardport.intermediate'
+ format_args = {
+ 'pr': new_pr,
+ 'footer': FOOTER,
+ }
+ self.env.ref(template)._send(
+ repository=new_pr.repository,
+ pull_request=new_pr.number,
+ token_field='fp_github_token',
+ format_args=format_args,
+ )
+
+ labels = ['forwardport']
+ if has_conflicts:
+ labels.append('conflict')
+ self.env['runbot_merge.pull_requests.tagging'].create({
+ 'repository': new_pr.repository.id,
+ 'pull_request': new_pr.number,
+ 'tags_add': labels,
+ })
+
+ new_batch = new_batch.batch_id
+ new_batch.parent_id = self
+ # try to schedule followup
+ new_batch._schedule_fp_followup()
+ return new_batch
+
+
+ def _schedule_fp_followup(self):
+ _logger = logging.getLogger(__name__).getChild('forwardport.next')
+ # if the PR has a parent and is CI-validated, enqueue the next PR
+ scheduled = self.browse(())
+ for batch in self:
+ prs = ', '.join(batch.prs.mapped('display_name'))
+ _logger.info('Checking if forward-port %s (%s)', batch, prs)
+ # in cas of conflict or update individual PRs will "lose" their
+ # parent, which should prevent forward porting
+ if not (batch.parent_id and all(p.parent_id for p in batch.prs)):
+ _logger.info('-> no parent %s (%s)', batch, prs)
+ continue
+ if not self.env.context.get('force_fw') and self.source.fw_policy != 'skipci' \
+ and (invalid := batch.prs.filtered(lambda p: p.state not in ['validated', 'ready'])):
+ _logger.info(
+ '-> wrong state %s (%s)',
+ batch,
+ ', '.join(f"{p.display_name}: {p.state}" for p in invalid),
+ )
+ continue
+
+ # check if we've already forward-ported this branch
+ next_target = self._find_next_targets()
+ if not next_target:
+ _logger.info("-> forward port done (no next target)")
+ continue
+ if len(next_target) > 1:
+ _logger.error(
+ "-> cancelling forward-port of %s (%s): inconsistent next target branch (%s)",
+ batch,
+ prs,
+ ', '.join(next_target.mapped('name')),
+ )
+
+ if n := self.search([
+ ('target', '=', next_target.id),
+ ('parent_id', '=', batch.id),
+ ], limit=1):
+ _logger.info('-> already forward-ported (%s)', n)
+ continue
+
+ _logger.info("check pending port for %s (%s)", batch, prs)
+ if self.env['forwardport.batches'].search_count([('batch_id', '=', batch.id)]):
+ _logger.warning('-> already recorded')
+ continue
+
+ _logger.info('-> ok')
+ self.env['forwardport.batches'].create({
+ 'batch_id': batch.id,
+ 'source': 'fp',
+ })
+ scheduled |= batch
+ return scheduled
+
+ def _find_next_target(self):
+ """Retrieves the next target from every PR, and returns it if it's the
+ same for all the PRs which have one (PRs without a next target are
+ ignored, this is considered acceptable).
+
+ If the next targets are inconsistent, returns no next target.
+ """
+ next_target = self._find_next_targets()
+ if len(next_target) == 1:
+ return next_target
+ else:
+ return self.env['runbot_merge.branch'].browse(())
+
+ def _find_next_targets(self):
+ return self.prs.mapped(lambda p: p._find_next_target() or self.env['runbot_merge.branch'])
+
+ def write(self, vals):
+ if vals.get('merge_date'):
+ # TODO: remove condition when everything is merged
+ remover = self.env.get('forwardport.branch_remover')
+ if remover is not None:
+ remover.create([
+ {'pr_id': p.id}
+ for b in self
+ if not b.merge_date
+ for p in b.prs
+ ])
+
+ if vals.get('fw_policy') == 'skipci':
+ nonskip = self.filtered(lambda b: b.fw_policy != 'skipci')
+ else:
+ nonskip = self.browse(())
+ super().write(vals)
+
+ # if we change the policy to skip CI, schedule followups on merged
+ # batches which were not previously marked as skipping CI
+ if nonskip:
+ toggled = nonskip.filtered(lambda b: b.merge_date)
+ tips = toggled.mapped(lambda b: b.genealogy_ids[-1:])
+ for tip in tips:
+ tip._schedule_fp_followup()
+
+ return True
diff --git a/runbot_merge/models/project.py b/runbot_merge/models/project.py
index c0420f9a..b3b55a8c 100644
--- a/runbot_merge/models/project.py
+++ b/runbot_merge/models/project.py
@@ -1,12 +1,14 @@
import logging
import re
-from typing import List, Tuple
+from typing import List
import requests
import sentry_sdk
from odoo import models, fields, api
from odoo.exceptions import UserError
+from odoo.osv import expression
+from odoo.tools import reverse_order
_logger = logging.getLogger(__name__)
class Project(models.Model):
@@ -217,3 +219,10 @@ class Project(models.Model):
]
})
return w.action_open()
+
+ def _forward_port_ordered(self, domain=()):
+ Branches = self.env['runbot_merge.branch']
+ return Branches.search(expression.AND([
+ [('project_id', '=', self.id)],
+ domain or [],
+ ]), order=reverse_order(Branches._order))
diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py
index f2adaa9d..ca3d90e5 100644
--- a/runbot_merge/models/pull_requests.py
+++ b/runbot_merge/models/pull_requests.py
@@ -10,14 +10,16 @@ import logging
import re
import time
from functools import reduce
-from typing import Optional, Union, List, Iterator
+from operator import itemgetter
+from typing import Optional, Union, List, Iterator, Tuple
+import psycopg2
import sentry_sdk
import werkzeug
from odoo import api, fields, models, tools, Command
from odoo.osv import expression
-from odoo.tools import html_escape
+from odoo.tools import html_escape, Reverse
from . import commands
from .utils import enum
@@ -416,10 +418,6 @@ class PullRequests(models.Model):
ping = fields.Char(compute='_compute_ping', recursive=True)
- fw_policy = fields.Selection([
- ('default', "Default"),
- ('skipci', "Skip CI"),
- ], required=True, default="default", string="Forward Port Policy")
source_id = fields.Many2one('runbot_merge.pull_requests', index=True, help="the original source of this FP even if parents were detached along the way")
parent_id = fields.Many2one(
'runbot_merge.pull_requests', index=True,
@@ -515,6 +513,16 @@ class PullRequests(models.Model):
def _linked_prs(self):
return self.batch_id.prs - self
+ @property
+ def limit_pretty(self):
+ if self.limit_id:
+ return self.limit_id.name
+
+ branches = self.repository.project_id.branch_ids
+ if ((bf := self.repository.branch_filter) or '[]') != '[]':
+ branches = branches.filtered_domain(ast.literal_eval(bf))
+ return branches[:1].name
+
@api.depends(
'batch_id.prs.draft',
'batch_id.prs.squash',
@@ -797,10 +805,20 @@ class PullRequests(models.Model):
else:
msg = "you can't configure forward-port CI."
case commands.Limit(branch) if is_author:
- ping, msg = self._maybe_update_limit(branch or self.target.name)
- if not ping:
- feedback(message=msg)
- msg = None
+ limit = branch or self.target.name
+ for p in self.batch_id.prs:
+ ping, m = p._maybe_update_limit(limit)
+
+ if ping and p == self:
+ msg = m
+ else:
+ if ping:
+ m = f"@{login} {m}"
+ self.env['runbot_merge.pull_requests.feedback'].create({
+ 'repository': p.repository.id,
+ 'pull_request': p.number,
+ 'message': m,
+ })
case commands.Limit():
msg = "you can't set a forward-port limit."
# NO!
@@ -824,6 +842,121 @@ class PullRequests(models.Model):
feedback(message=f"@{login}{rejections}{footer}")
return 'rejected'
+ def _maybe_update_limit(self, limit: str) -> Tuple[bool, str]:
+ limit_id = self.env['runbot_merge.branch'].with_context(active_test=False).search([
+ ('project_id', '=', self.repository.project_id.id),
+ ('name', '=', limit),
+ ])
+ if not limit_id:
+ return True, f"there is no branch {limit!r}, it can't be used as a forward port target."
+
+ if limit_id != self.target and not limit_id.active:
+ return True, f"branch {limit_id.name!r} is disabled, it can't be used as a forward port target."
+
+ # not forward ported yet, just acknowledge the request
+ if not self.source_id and self.state != 'merged':
+ self.limit_id = limit_id
+ if branch_key(limit_id) <= branch_key(self.target):
+ return False, "Forward-port disabled."
+ else:
+ return False, f"Forward-porting to {limit_id.name!r}."
+
+ # if the PR has been forwardported
+ prs = (self | self.forwardport_ids | self.source_id | self.source_id.forwardport_ids)
+ tip = max(prs, key=pr_key)
+ # if the fp tip was closed it's fine
+ if tip.state == 'closed':
+ return True, f"{tip.display_name} is closed, no forward porting is going on"
+
+ prs.limit_id = limit_id
+
+ real_limit = max(limit_id, tip.target, key=branch_key)
+
+ addendum = ''
+ # check if tip was queued for forward porting, try to cancel if we're
+ # supposed to stop here
+ if real_limit == tip.target and (task := self.env['forwardport.batches'].search([('batch_id', '=', tip.batch_id.id)])):
+ try:
+ with self.env.cr.savepoint():
+ self.env.cr.execute(
+ "SELECT FROM forwardport_batches "
+ "WHERE id = %s FOR UPDATE NOWAIT",
+ [task.id])
+ except psycopg2.errors.LockNotAvailable:
+ # row locked = port occurring and probably going to succeed,
+ # so next(real_limit) likely a done deal already
+ return True, (
+ f"Forward port of {tip.display_name} likely already "
+ f"ongoing, unable to cancel, close next forward port "
+ f"when it completes.")
+ else:
+ self.env.cr.execute("DELETE FROM forwardport_batches WHERE id = %s", [task.id])
+
+ if real_limit != tip.target:
+ # forward porting was previously stopped at tip, and we want it to
+ # resume
+ if tip.state == 'merged':
+ self.env['forwardport.batches'].create({
+ 'batch_id': tip.batch_id.id,
+ 'source': 'fp' if tip.parent_id else 'merge',
+ })
+ resumed = tip
+ else:
+ resumed = tip.batch_id._schedule_fp_followup()
+ if resumed:
+ addendum += f', resuming forward-port stopped at {tip.display_name}'
+
+ if real_limit != limit_id:
+ addendum += f' (instead of the requested {limit_id.name!r} because {tip.display_name} already exists)'
+
+ # get a "stable" root rather than self's to avoid divertences between
+ # PRs across a root divide (where one post-root would point to the root,
+ # and one pre-root would point to the source, or a previous root)
+ root = tip.root_id
+ # reference the root being forward ported unless we are the root
+ root_ref = '' if root == self else f' {root.display_name}'
+ msg = f"Forward-porting{root_ref} to {real_limit.name!r}{addendum}."
+ # send a message to the source & root except for self, if they exist
+ root_msg = f'Forward-porting to {real_limit.name!r} (from {self.display_name}).'
+ self.env['runbot_merge.pull_requests.feedback'].create([
+ {
+ 'repository': p.repository.id,
+ 'pull_request': p.number,
+ 'message': root_msg,
+ 'token_field': 'fp_github_token',
+ }
+ # send messages to source and root unless root is self (as it
+ # already gets the normal message)
+ for p in (self.source_id | root) - self
+ ])
+
+ return False, msg
+
+
+ def _find_next_target(self) -> Optional[Branch]:
+ """ Finds the branch between target and limit_id which follows
+ reference
+ """
+ root = (self.source_id or self)
+ if self.target == root.limit_id:
+ return
+
+ branches = root.target.project_id.with_context(active_test=False)._forward_port_ordered()
+ if (branch_filter := self.repository.branch_filter) and branch_filter != '[]':
+ branches = branches.filtered_domain(ast.literal_eval(branch_filter))
+
+ branches = list(branches)
+ from_ = branches.index(self.target) + 1
+ to_ = branches.index(root.limit_id) + 1 if root.limit_id else None
+
+ # return the first active branch in the set
+ return next((
+ branch
+ for branch in branches[from_:to_]
+ if branch.active
+ ), None)
+
+
def _approve(self, author, login):
oldstate = self.state
newstate = RPLUS.get(self.state)
@@ -1023,20 +1156,33 @@ class PullRequests(models.Model):
return self.state
def _get_batch(self, *, target, label):
- Batches = self.env['runbot_merge.batch']
- if re.search(r':patch-\d+$', label):
- batch = Batches
- else:
- batch = Batches.search([
+ batch = self.env['runbot_merge.batch']
+ if not re.search(r':patch-\d+$', label):
+ batch = batch.search([
('merge_date', '=', False),
- ('target', '=', target),
+ ('prs.target', '=', target),
('prs.label', '=', label),
])
- return batch or Batches.create({'target': target})
+ return batch or batch.create({})
@api.model
def create(self, vals):
- vals['batch_id'] = self._get_batch(target=vals['target'], label=vals['label']).id
+ batch = self._get_batch(target=vals['target'], label=vals['label'])
+ vals['batch_id'] = batch.id
+ if 'limit_id' not in vals:
+ limits = {p.limit_id for p in batch.prs}
+ if len(limits) == 1:
+ vals['limit_id'] = limits.pop().id
+ elif limits:
+ repo = self.env['runbot_merge.repository'].browse(vals['repository'])
+ _logger.warning(
+ "Unable to set limit on %s#%s: found multiple limits in batch (%s)",
+ repo.name, vals['number'],
+ ', '.join(
+ f'{p.display_name} => {p.limit_id.name}'
+ for p in batch.prs
+ )
+ )
pr = super().create(vals)
c = self.env['runbot_merge.commit'].search([('sha', '=', pr.head)])
@@ -1116,13 +1262,6 @@ class PullRequests(models.Model):
target=vals.get('target') or self.target.id,
label=vals.get('label') or self.label,
)
- case None if (new_target := vals.get('target')) and new_target != self.target.id:
- vals['batch_id'] = self._get_batch(
- target=new_target,
- label=vals.get('label') or self.label,
- )
- if len(self.batch_id.prs) == 1:
- self.env.cr.precommit.add(self.batch_id.unlink)
w = super().write(vals)
newhead = vals.get('head')
@@ -1259,6 +1398,18 @@ class PullRequests(models.Model):
old_batch.unlink()
return True
+# ordering is a bit unintuitive because the lowest sequence (and name)
+# is the last link of the fp chain, reasoning is a bit more natural the
+# other way around (highest object is the last), especially with Python
+# not really having lazy sorts in the stdlib
+def branch_key(b: Branch, /, _key=itemgetter('sequence', 'name')):
+ return Reverse(_key(b))
+
+
+def pr_key(p: PullRequests, /):
+ return branch_key(p.target)
+
+
# state changes on reviews
RPLUS = {
'opened': 'approved',
diff --git a/runbot_merge/views/mergebot.xml b/runbot_merge/views/mergebot.xml
index f4a65b61..46e87ab8 100644
--- a/runbot_merge/views/mergebot.xml
+++ b/runbot_merge/views/mergebot.xml
@@ -158,7 +158,7 @@
-
+