mirror of
https://github.com/odoo/runbot.git
synced 2025-03-15 15:35:46 +07:00
[MERGE] runbot_merge, forwardport: v2 model
Massive rewrite of commands set, state management, and batch management: - the commands set was largely rewritten, removing the interactions with the fwbot entirely (only one bot receives commands anymore), and commands were made more flexible & orthogonal, also parsing was formalised - state management was rewritten to better leverage computes - batches are now persistent rather than being ad-hoc staging-only concepts, a batch is still a horizontal set of PRs but it now "keeps" through multiple stagings and batches can be linked - which allows a v2 dashboard showing a complete overview of the batches through forward-ports, including showing the overview on the PR itself
This commit is contained in:
commit
5703513c46
@ -1175,6 +1175,9 @@ class Model:
|
||||
def __len__(self):
|
||||
return len(self._ids)
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self._model, frozenset(self._ids)))
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Model):
|
||||
return NotImplemented
|
||||
|
@ -1,7 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
{
|
||||
'name': 'forward port bot',
|
||||
'version': '1.3',
|
||||
'version': '1.4',
|
||||
'summary': "A port which forward ports successful PRs.",
|
||||
'depends': ['runbot_merge'],
|
||||
'data': [
|
||||
|
@ -136,31 +136,6 @@
|
||||
t-attf-title="@{{pr.reviewed_by.github_login}}"/>
|
||||
</dd>
|
||||
</t>
|
||||
<t t-if="pr.source_id">
|
||||
<dt>forward-port of</dt>
|
||||
<dd>
|
||||
<a t-att-href="pr.source_id.url">
|
||||
<span t-field="pr.source_id.display_name"/>
|
||||
</a>
|
||||
<span t-if="not pr.parent_id"
|
||||
class="badge badge-danger user-select-none"
|
||||
title="A detached PR behaves like a non-forward-port, it has to be approved via the mergebot, this is usually caused by the forward-port having been in conflict or updated.">
|
||||
DETACHED (<span t-out="pr.detach_reason" style="white-space: pre-wrap;"/>)
|
||||
</span>
|
||||
</dd>
|
||||
</t>
|
||||
<t t-if="pr.forwardport_ids">
|
||||
<dt>forward-ports</dt>
|
||||
<dd><ul>
|
||||
<t t-foreach="pr.forwardport_ids" t-as="p">
|
||||
<t t-set="bgsignal"><t t-call="forwardport.pr_background"/></t>
|
||||
<li t-att-class="bgsignal">
|
||||
<a t-att-href="p.url"><span t-field="p.display_name"/></a>
|
||||
targeting <span t-field="p.target.name"/>
|
||||
</li>
|
||||
</t>
|
||||
</ul></dd>
|
||||
</t>
|
||||
</xpath>
|
||||
</template>
|
||||
|
||||
@ -176,7 +151,6 @@
|
||||
</group>
|
||||
<group>
|
||||
<field string="Bot Name" name="fp_github_name" readonly="0"/>
|
||||
<field string="Bot Email" name="fp_github_email" readonly="0"/>
|
||||
</group>
|
||||
</group>
|
||||
</xpath>
|
||||
@ -200,37 +174,4 @@
|
||||
</field>
|
||||
</record>
|
||||
|
||||
<record model="ir.ui.view" id="pr">
|
||||
<field name="name">Show forwardport PR fields</field>
|
||||
<field name="inherit_id" ref="runbot_merge.runbot_merge_form_prs"/>
|
||||
<field name="model">runbot_merge.pull_requests</field>
|
||||
<field name="arch" type="xml">
|
||||
<xpath expr="//field[@name='state']" position="after">
|
||||
<field name="merge_date" attrs="{'invisible': [('state', '!=', 'merged')]}"/>
|
||||
</xpath>
|
||||
<xpath expr="//sheet/group[2]" position="after">
|
||||
<separator string="Forward Port" attrs="{'invisible': [('source_id', '=', False)]}"/>
|
||||
<group attrs="{'invisible': [('source_id', '!=', False)]}">
|
||||
<group>
|
||||
<field string="Policy" name="fw_policy"/>
|
||||
</group>
|
||||
</group>
|
||||
<group attrs="{'invisible': [('source_id', '=', False)]}">
|
||||
<group>
|
||||
<field string="Original PR" name="source_id"/>
|
||||
</group>
|
||||
<group attrs="{'invisible': [('parent_id', '=', False)]}">
|
||||
<field name="parent_id"/>
|
||||
</group>
|
||||
<group colspan="4" attrs="{'invisible': [('parent_id', '!=', False)]}">
|
||||
<field string="Detached because" name="detach_reason" readonly="1"/>
|
||||
</group>
|
||||
<group>
|
||||
<field string="Forward ported up to" name="limit_id"/>
|
||||
</group>
|
||||
</group>
|
||||
</xpath>
|
||||
</field>
|
||||
</record>
|
||||
|
||||
</odoo>
|
||||
|
7
forwardport/migrations/15.0.1.4/pre-migration.py
Normal file
7
forwardport/migrations/15.0.1.4/pre-migration.py
Normal file
@ -0,0 +1,7 @@
|
||||
def migrate(cr, version):
|
||||
cr.execute("ALTER TABLE runbot_merge_project DROP COLUMN IF EXISTS fp_github_email")
|
||||
cr.execute("""
|
||||
ALTER TABLE runbot_merge_branch
|
||||
DROP COLUMN IF EXISTS fp_sequence,
|
||||
DROP COLUMN IF EXISTS fp_target
|
||||
""")
|
@ -1,9 +1,12 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import contextlib
|
||||
import logging
|
||||
import re
|
||||
import uuid
|
||||
from contextlib import ExitStack
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import requests
|
||||
import sentry_sdk
|
||||
from dateutil import relativedelta
|
||||
|
||||
@ -13,6 +16,7 @@ from odoo.addons.runbot_merge.github import GH
|
||||
|
||||
# how long a merged PR survives
|
||||
MERGE_AGE = relativedelta.relativedelta(weeks=2)
|
||||
FOOTER = '\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n'
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
@ -66,9 +70,11 @@ class ForwardPortTasks(models.Model, Queue):
|
||||
source = fields.Selection([
|
||||
('merge', 'Merge'),
|
||||
('fp', 'Forward Port Followup'),
|
||||
('insert', 'New branch port')
|
||||
('insert', 'New branch port'),
|
||||
('complete', 'Complete ported batches'),
|
||||
], required=True)
|
||||
retry_after = fields.Datetime(required=True, default='1900-01-01 01:01:01')
|
||||
pr_id = fields.Many2one('runbot_merge.pull_requests')
|
||||
|
||||
def _search_domain(self):
|
||||
return super()._search_domain() + [
|
||||
@ -82,35 +88,155 @@ class ForwardPortTasks(models.Model, Queue):
|
||||
def _process_item(self):
|
||||
batch = self.batch_id
|
||||
sentry_sdk.set_tag('forward-porting', batch.prs.mapped('display_name'))
|
||||
newbatch = batch.prs._port_forward()
|
||||
if self.source == 'complete':
|
||||
self._complete_batches()
|
||||
return
|
||||
|
||||
if newbatch:
|
||||
_logger.info(
|
||||
"Processing %s (from %s): %s (%s) -> %s (%s)",
|
||||
self.id, self.source,
|
||||
batch, batch.prs,
|
||||
newbatch, newbatch.prs,
|
||||
)
|
||||
# insert new batch in ancestry sequence unless conflict (= no parent)
|
||||
if self.source == 'insert':
|
||||
for pr in newbatch.prs:
|
||||
if not pr.parent_id:
|
||||
break
|
||||
newchild = pr.search([
|
||||
('parent_id', '=', pr.parent_id.id),
|
||||
('id', '!=', pr.id),
|
||||
])
|
||||
if newchild:
|
||||
newchild.parent_id = pr.id
|
||||
else: # reached end of seq (or batch is empty)
|
||||
newbatch = batch._port_forward()
|
||||
if not newbatch: # reached end of seq (or batch is empty)
|
||||
# FIXME: or configuration is fucky so doesn't want to FP (maybe should error and retry?)
|
||||
_logger.info(
|
||||
"Processing %s (from %s): %s (%s) -> end of the sequence",
|
||||
self.id, self.source,
|
||||
batch, batch.prs
|
||||
"Processed %s from %s (%s) -> end of the sequence",
|
||||
batch, self.source, batch.prs.mapped('display_name'),
|
||||
)
|
||||
batch.active = False
|
||||
return
|
||||
|
||||
_logger.info(
|
||||
"Processed %s from %s (%s) -> %s (%s)",
|
||||
batch, self.source, ', '.join(batch.prs.mapped('display_name')),
|
||||
newbatch, ', '.join(newbatch.prs.mapped('display_name')),
|
||||
)
|
||||
# insert new batch in ancestry sequence
|
||||
if self.source == 'insert':
|
||||
self.env['runbot_merge.batch'].search([
|
||||
('parent_id', '=', batch.id),
|
||||
('id', '!=', newbatch.id),
|
||||
]).parent_id = newbatch.id
|
||||
# insert new PRs in ancestry sequence unless conflict (= no parent)
|
||||
for pr in newbatch.prs:
|
||||
if not pr.parent_id:
|
||||
break
|
||||
newchild = pr.search([
|
||||
('parent_id', '=', pr.parent_id.id),
|
||||
('id', '!=', pr.id),
|
||||
])
|
||||
if newchild:
|
||||
newchild.parent_id = pr.id
|
||||
|
||||
def _complete_batches(self):
|
||||
source = pr = self.pr_id
|
||||
if not pr:
|
||||
_logger.warning(
|
||||
"Unable to complete descendants of %s (%s): no new PR",
|
||||
self.batch_id,
|
||||
self.batch_id.prs.mapped('display_name'),
|
||||
)
|
||||
return
|
||||
_logger.info(
|
||||
"Completing batches for descendants of %s (added %s)",
|
||||
self.batch_id.prs.mapped('display_name'),
|
||||
self.pr_id.display_name,
|
||||
)
|
||||
|
||||
gh = requests.Session()
|
||||
repository = pr.repository
|
||||
gh.headers['Authorization'] = f'token {repository.project_id.fp_github_token}'
|
||||
PullRequests = self.env['runbot_merge.pull_requests']
|
||||
self.env.cr.execute('LOCK runbot_merge_pull_requests IN SHARE MODE')
|
||||
|
||||
# TODO: extract complete list of targets from `_find_next_target`
|
||||
# so we can create all the forwardport branches, push them, and
|
||||
# only then create the PR objects
|
||||
# TODO: maybe do that after making forward-port WC-less, so all the
|
||||
# branches can be pushed atomically at once
|
||||
with contextlib.ExitStack() as s:
|
||||
for descendant in self.batch_id.descendants():
|
||||
target = pr._find_next_target()
|
||||
if target is None:
|
||||
_logger.info("Will not forward-port %s: no next target", pr.display_name)
|
||||
return
|
||||
|
||||
if PullRequests.search_count([
|
||||
('source_id', '=', source.id),
|
||||
('target', '=', target.id),
|
||||
('state', 'not in', ('closed', 'merged')),
|
||||
]):
|
||||
_logger.warning("Will not forward-port %s: already ported", pr.display_name)
|
||||
return
|
||||
|
||||
if target != descendant.target:
|
||||
self.env['runbot_merge.pull_requests.feedback'].create({
|
||||
'repository': repository.id,
|
||||
'pull_request': source.id,
|
||||
'token_field': 'fp_github_token',
|
||||
'message': """\
|
||||
{pr.ping}unable to port this PR forwards due to inconsistency: goes from \
|
||||
{pr.target.name} to {next_target.name} but {batch} ({batch_prs}) targets \
|
||||
{batch.target.name}.
|
||||
""".format(pr=pr, next_target=target, batch=descendant, batch_prs=', '.join(descendant.mapped('prs.display_name')))
|
||||
})
|
||||
return
|
||||
|
||||
ref = descendant.prs[:1].refname
|
||||
# NOTE: ports the new source everywhere instead of porting each
|
||||
# PR to the next step as it does not *stop* on conflict
|
||||
conflict, working_copy = source._create_fp_branch(target, ref, s)
|
||||
working_copy.push('target', ref)
|
||||
|
||||
remote_target = repository.fp_remote_target
|
||||
owner, _ = remote_target.split('/', 1)
|
||||
message = source.message + f"\n\nForward-Port-Of: {pr.display_name}"
|
||||
|
||||
title, body = re.match(r'(?P<title>[^\n]+)\n*(?P<body>.*)', message, flags=re.DOTALL).groups()
|
||||
r = gh.post(f'https://api.github.com/repos/{pr.repository.name}/pulls', json={
|
||||
'base': target.name,
|
||||
'head': f'{owner}:{ref}',
|
||||
'title': '[FW]' + (' ' if title[0] != '[' else '') + title,
|
||||
'body': body
|
||||
})
|
||||
if not r.ok:
|
||||
_logger.warning("Failed to create forward-port PR for %s, deleting branches", pr.display_name)
|
||||
# delete all the branches this should automatically close the
|
||||
# PRs if we've created any. Using the API here is probably
|
||||
# simpler than going through the working copies
|
||||
d = gh.delete(f'https://api.github.com/repos/{remote_target}/git/refs/heads/{ref}')
|
||||
if d.ok:
|
||||
_logger.info("Deleting %s:%s=success", remote_target, ref)
|
||||
else:
|
||||
_logger.warning("Deleting %s:%s=%s", remote_target, ref, d.text)
|
||||
raise RuntimeError(f"Forwardport failure: {pr.display_name} ({r.text})")
|
||||
|
||||
new_pr = PullRequests._from_gh(r.json())
|
||||
_logger.info("Created forward-port PR %s", new_pr)
|
||||
new_pr.write({
|
||||
'batch_id': descendant.id, # should already be set correctly but...
|
||||
'merge_method': pr.merge_method,
|
||||
'source_id': source.id,
|
||||
# only link to previous PR of sequence if cherrypick passed
|
||||
# FIXME: apply parenting of siblings? Apply parenting *to* siblings?
|
||||
'parent_id': pr.id if not conflict else False,
|
||||
'detach_reason': "{1}\n{2}".format(*conflict).strip() if conflict else None,
|
||||
})
|
||||
|
||||
if conflict:
|
||||
self.env.ref('runbot_merge.forwardport.failure.conflict')._send(
|
||||
repository=pr.repository,
|
||||
pull_request=pr.number,
|
||||
token_field='fp_github_token',
|
||||
format_args={'source': source, 'pr': pr, 'new': new_pr, 'footer': FOOTER},
|
||||
)
|
||||
new_pr._fp_conflict_feedback(pr, {pr: conflict})
|
||||
|
||||
labels = ['forwardport']
|
||||
if conflict:
|
||||
labels.append('conflict')
|
||||
self.env['runbot_merge.pull_requests.tagging'].create({
|
||||
'repository': new_pr.repository.id,
|
||||
'pull_request': new_pr.number,
|
||||
'tags_add': labels,
|
||||
})
|
||||
|
||||
pr = new_pr
|
||||
|
||||
class UpdateQueue(models.Model, Queue):
|
||||
_name = 'forwardport.updates'
|
||||
|
@ -11,41 +11,31 @@ means PR creation is trickier (as mergebot assumes opened event will always
|
||||
lead to PR creation but fpbot wants to attach meaning to the PR when setting
|
||||
it up), ...
|
||||
"""
|
||||
import ast
|
||||
import base64
|
||||
import contextlib
|
||||
import datetime
|
||||
import itertools
|
||||
import json
|
||||
import logging
|
||||
import operator
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import typing
|
||||
from functools import reduce
|
||||
from operator import itemgetter
|
||||
from pathlib import Path
|
||||
|
||||
import dateutil.relativedelta
|
||||
import psycopg2.errors
|
||||
import requests
|
||||
|
||||
from odoo import models, fields, api
|
||||
from odoo.osv import expression
|
||||
from odoo.exceptions import UserError
|
||||
from odoo.tools.misc import topological_sort, groupby, Reverse
|
||||
from odoo.tools.sql import reverse_order
|
||||
from odoo.osv import expression
|
||||
from odoo.tools.misc import topological_sort, groupby
|
||||
from odoo.tools.appdirs import user_cache_dir
|
||||
from odoo.addons.base.models.res_partner import Partner
|
||||
from odoo.addons.runbot_merge import git, utils
|
||||
from odoo.addons.runbot_merge.models.pull_requests import RPLUS, Branch
|
||||
from odoo.addons.runbot_merge import git
|
||||
from odoo.addons.runbot_merge.models.pull_requests import Branch
|
||||
from odoo.addons.runbot_merge.models.stagings_create import Message
|
||||
|
||||
|
||||
footer = '\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n'
|
||||
|
||||
DEFAULT_DELTA = dateutil.relativedelta.relativedelta(days=3)
|
||||
|
||||
_logger = logging.getLogger('odoo.addons.forwardport')
|
||||
@ -55,55 +45,6 @@ class Project(models.Model):
|
||||
|
||||
id: int
|
||||
github_prefix: str
|
||||
fp_github_token = fields.Char()
|
||||
fp_github_name = fields.Char(store=True, compute="_compute_git_identity")
|
||||
fp_github_email = fields.Char(store=True, compute="_compute_git_identity")
|
||||
|
||||
def _find_commands(self, comment):
|
||||
if self.env.context.get('without_forward_port'):
|
||||
return super()._find_commands(comment)
|
||||
|
||||
return re.findall(
|
||||
'^\s*[@|#]?{}:? (.*)$'.format(self.fp_github_name),
|
||||
comment, re.MULTILINE | re.IGNORECASE
|
||||
) + super()._find_commands(comment)
|
||||
|
||||
# technically the email could change at any moment...
|
||||
@api.depends('fp_github_token')
|
||||
def _compute_git_identity(self):
|
||||
s = requests.Session()
|
||||
for project in self:
|
||||
if not project.fp_github_token or (project.fp_github_name and project.fp_github_email):
|
||||
continue
|
||||
|
||||
r0 = s.get('https://api.github.com/user', headers={
|
||||
'Authorization': 'token %s' % project.fp_github_token
|
||||
})
|
||||
if not r0.ok:
|
||||
_logger.error("Failed to fetch forward bot information for project %s: %s", project.name, r0.text or r0.content)
|
||||
continue
|
||||
|
||||
user = r0.json()
|
||||
project.fp_github_name = user['name'] or user['login']
|
||||
if email := user['email']:
|
||||
project.fp_github_email = email
|
||||
continue
|
||||
|
||||
if 'user:email' not in set(re.split(r',\s*', r0.headers['x-oauth-scopes'])):
|
||||
raise UserError("The forward-port github token needs the user:email scope to fetch the bot's identity.")
|
||||
r1 = s.get('https://api.github.com/user/emails', headers={
|
||||
'Authorization': 'token %s' % project.fp_github_token
|
||||
})
|
||||
if not r1.ok:
|
||||
_logger.error("Failed to fetch forward bot emails for project %s: %s", project.name, r1.text or r1.content)
|
||||
continue
|
||||
project.fp_github_email = next((
|
||||
entry['email']
|
||||
for entry in r1.json()
|
||||
if entry['primary']
|
||||
), None)
|
||||
if not project.fp_github_email:
|
||||
raise UserError("The forward-port bot needs a public or primary email set up.")
|
||||
|
||||
def write(self, vals):
|
||||
# check on branches both active and inactive so disabling branches doesn't
|
||||
@ -122,34 +63,43 @@ class Project(models.Model):
|
||||
because no CI or CI failed), create followup, as if the branch had been
|
||||
originally disabled (and thus skipped over)
|
||||
"""
|
||||
PRs = self.env['runbot_merge.pull_requests']
|
||||
Batch = self.env['runbot_merge.batch']
|
||||
ported = self.env['runbot_merge.pull_requests']
|
||||
for p in self:
|
||||
actives = previously_active_branches[p]
|
||||
for deactivated in p.branch_ids.filtered(lambda b: not b.active) & actives:
|
||||
# if a PR targets a deactivated branch, and that's not its limit,
|
||||
# and it doesn't have a child (e.g. CI failed), enqueue a forward
|
||||
# port as if the now deactivated branch had been skipped over (which
|
||||
# is the normal fw behaviour)
|
||||
extant = PRs.search([
|
||||
# if a non-merged batch targets a deactivated branch which is
|
||||
# not its limit
|
||||
extant = Batch.search([
|
||||
('parent_id', '!=', False),
|
||||
('target', '=', deactivated.id),
|
||||
('source_id.limit_id', '!=', deactivated.id),
|
||||
('state', 'not in', ('closed', 'merged')),
|
||||
])
|
||||
for p in extant.with_context(force_fw=True):
|
||||
next_target = p.source_id._find_next_target(p)
|
||||
# should not happen since we already filtered out limits
|
||||
if not next_target:
|
||||
continue
|
||||
|
||||
# check if it has a descendant in the next branch, if so skip
|
||||
if PRs.search_count([
|
||||
('source_id', '=', p.source_id.id),
|
||||
('target', '=', next_target.id)
|
||||
]):
|
||||
continue
|
||||
|
||||
# if at least one of the PRs has a different limit
|
||||
('prs.limit_id', '!=', deactivated.id),
|
||||
('merge_date', '=', False),
|
||||
]).filtered(lambda b:\
|
||||
# and has a next target (should already be a function of
|
||||
# the search but doesn't hurt)
|
||||
b._find_next_target() \
|
||||
# and has not already been forward ported
|
||||
and Batch.search_count([('parent_id', '=', b.id)]) == 0
|
||||
)
|
||||
ported |= extant.prs.filtered(lambda p: p._find_next_target())
|
||||
# enqueue a forward port as if the now deactivated branch had
|
||||
# been skipped over (which is the normal fw behaviour)
|
||||
for b in extant.with_context(force_fw=True):
|
||||
# otherwise enqueue a followup
|
||||
p._schedule_fp_followup()
|
||||
b._schedule_fp_followup()
|
||||
|
||||
if not ported:
|
||||
return
|
||||
|
||||
for feedback in self.env['runbot_merge.pull_requests.feedback'].search(expression.OR(
|
||||
[('repository', '=', p.repository.id), ('pull_request', '=', p.number)]
|
||||
for p in ported
|
||||
)):
|
||||
# FIXME: better signal
|
||||
if 'disabled' in feedback.message:
|
||||
feedback.message += '\n\nAs this was not its limit, it will automatically be forward ported to the next active branch.'
|
||||
|
||||
def _insert_intermediate_prs(self, branches_before):
|
||||
"""If new branches have been added to the sequence inbetween existing
|
||||
@ -207,21 +157,10 @@ class Project(models.Model):
|
||||
# the parents linked list, so it has a special type
|
||||
for _, cs in groupby(candidates, key=lambda p: p.label):
|
||||
self.env['forwardport.batches'].create({
|
||||
'batch_id': self.env['runbot_merge.batch'].create({
|
||||
'target': before[-1].id,
|
||||
'prs': [(4, c.id, 0) for c in cs],
|
||||
'active': False,
|
||||
}).id,
|
||||
'batch_id': cs[0].batch_id.id,
|
||||
'source': 'insert',
|
||||
})
|
||||
|
||||
def _forward_port_ordered(self, domain=()):
|
||||
Branches = self.env['runbot_merge.branch']
|
||||
return Branches.search(expression.AND([
|
||||
[('project_id', '=', self.id)],
|
||||
domain or [],
|
||||
]), order=reverse_order(Branches._order))
|
||||
|
||||
class Repository(models.Model):
|
||||
_inherit = 'runbot_merge.repository'
|
||||
|
||||
@ -243,62 +182,7 @@ class PullRequests(models.Model):
|
||||
head: str
|
||||
state: str
|
||||
|
||||
statuses = fields.Text(recursive=True)
|
||||
|
||||
limit_id = fields.Many2one('runbot_merge.branch', help="Up to which branch should this PR be forward-ported")
|
||||
|
||||
parent_id = fields.Many2one(
|
||||
'runbot_merge.pull_requests', index=True,
|
||||
help="a PR with a parent is an automatic forward port"
|
||||
)
|
||||
root_id = fields.Many2one('runbot_merge.pull_requests', compute='_compute_root', recursive=True)
|
||||
source_id = fields.Many2one('runbot_merge.pull_requests', index=True, help="the original source of this FP even if parents were detached along the way")
|
||||
forwardport_ids = fields.One2many('runbot_merge.pull_requests', 'source_id')
|
||||
reminder_backoff_factor = fields.Integer(default=-4, group_operator=None)
|
||||
merge_date = fields.Datetime()
|
||||
|
||||
detach_reason = fields.Char()
|
||||
|
||||
fw_policy = fields.Selection([
|
||||
('ci', "Normal"),
|
||||
('skipci', "Skip CI"),
|
||||
# ('skipmerge', "Skip merge"),
|
||||
], required=True, default="ci")
|
||||
|
||||
_sql_constraints = [(
|
||||
'fw_constraint',
|
||||
'check(source_id is null or num_nonnulls(parent_id, detach_reason) = 1)',
|
||||
"fw PRs must either be attached or have a reason for being detached",
|
||||
)]
|
||||
|
||||
refname = fields.Char(compute='_compute_refname')
|
||||
@api.depends('label')
|
||||
def _compute_refname(self):
|
||||
for pr in self:
|
||||
pr.refname = pr.label.split(':', 1)[-1]
|
||||
|
||||
ping = fields.Char(recursive=True)
|
||||
|
||||
@api.depends('source_id.author.github_login', 'source_id.reviewed_by.github_login')
|
||||
def _compute_ping(self):
|
||||
"""For forward-port PRs (PRs with a source) the author is the PR bot, so
|
||||
we want to ignore that and use the author & reviewer of the original PR
|
||||
"""
|
||||
source = self.source_id
|
||||
if not source:
|
||||
return super()._compute_ping()
|
||||
|
||||
for pr in self:
|
||||
s = ' '.join(
|
||||
f'@{p.github_login}'
|
||||
for p in source.author | source.reviewed_by | self.reviewed_by
|
||||
)
|
||||
pr.ping = s and (s + ' ')
|
||||
|
||||
@api.depends('parent_id.root_id')
|
||||
def _compute_root(self):
|
||||
for p in self:
|
||||
p.root_id = reduce(lambda _, p: p, self._iter_ancestors())
|
||||
|
||||
@api.model_create_single
|
||||
def create(self, vals):
|
||||
@ -310,17 +194,21 @@ class PullRequests(models.Model):
|
||||
if existing:
|
||||
return existing
|
||||
|
||||
if 'limit_id' not in vals:
|
||||
branch = self.env['runbot_merge.branch'].browse(vals['target'])
|
||||
repo = self.env['runbot_merge.repository'].browse(vals['repository'])
|
||||
vals['limit_id'] = branch.project_id._forward_port_ordered(
|
||||
ast.literal_eval(repo.branch_filter or '[]')
|
||||
)[-1].id
|
||||
if vals.get('parent_id') and 'source_id' not in vals:
|
||||
vals['source_id'] = self.browse(vals['parent_id']).root_id.id
|
||||
if vals.get('state') == 'merged':
|
||||
vals['merge_date'] = fields.Datetime.now()
|
||||
return super().create(vals)
|
||||
pr = super().create(vals)
|
||||
|
||||
# added a new PR to an already forward-ported batch: port the PR
|
||||
if self.env['runbot_merge.batch'].search_count([
|
||||
('parent_id', '=', pr.batch_id.id),
|
||||
]):
|
||||
self.env['forwardport.batches'].create({
|
||||
'batch_id': pr.batch_id.id,
|
||||
'source': 'complete',
|
||||
'pr_id': pr.id,
|
||||
})
|
||||
|
||||
return pr
|
||||
|
||||
def write(self, vals):
|
||||
# if the PR's head is updated, detach (should split off the FP lines as this is not the original code)
|
||||
@ -351,8 +239,6 @@ class PullRequests(models.Model):
|
||||
if vals.get('parent_id') and 'source_id' not in vals:
|
||||
parent = self.browse(vals['parent_id'])
|
||||
vals['source_id'] = (parent.source_id or parent).id
|
||||
if vals.get('state') == 'merged':
|
||||
vals['merge_date'] = fields.Datetime.now()
|
||||
r = super().write(vals)
|
||||
if self.env.context.get('forwardport_detach_warn', True):
|
||||
for p, parent in with_parents.items():
|
||||
@ -371,24 +257,6 @@ class PullRequests(models.Model):
|
||||
token_field='fp_github_token',
|
||||
format_args={'pr': parent, 'child': p},
|
||||
)
|
||||
for p in closed_fp.filtered(lambda p: p.state != 'closed'):
|
||||
self.env.ref('runbot_merge.forwardport.reopen.detached')._send(
|
||||
repository=p.repository,
|
||||
pull_request=p.number,
|
||||
token_field='fp_github_token',
|
||||
format_args={'pr': p},
|
||||
)
|
||||
if vals.get('state') == 'merged':
|
||||
self.env['forwardport.branch_remover'].create([
|
||||
{'pr_id': p.id}
|
||||
for p in self
|
||||
])
|
||||
# if we change the policy to skip CI, schedule followups on existing FPs
|
||||
if vals.get('fw_policy') == 'skipci' and self.state == 'merged':
|
||||
self.env['runbot_merge.pull_requests'].search([
|
||||
('source_id', '=', self.id),
|
||||
('state', 'not in', ('closed', 'merged')),
|
||||
])._schedule_fp_followup()
|
||||
return r
|
||||
|
||||
def _try_closing(self, by):
|
||||
@ -404,182 +272,6 @@ class PullRequests(models.Model):
|
||||
})
|
||||
return r
|
||||
|
||||
def _parse_commands(self, author, comment, login):
|
||||
super(PullRequests, self.with_context(without_forward_port=True))._parse_commands(author, comment, login)
|
||||
|
||||
tokens = [
|
||||
token
|
||||
for line in re.findall('^\s*[@|#]?{}:? (.*)$'.format(self.repository.project_id.fp_github_name), comment['body'] or '', re.MULTILINE | re.IGNORECASE)
|
||||
for token in line.split()
|
||||
]
|
||||
if not tokens:
|
||||
_logger.info("found no commands in comment of %s (%s) (%s)", author.github_login, author.display_name,
|
||||
utils.shorten(comment['body'] or '', 50)
|
||||
)
|
||||
return
|
||||
|
||||
# TODO: don't use a mutable tokens iterator
|
||||
tokens = iter(tokens)
|
||||
while True:
|
||||
token = next(tokens, None)
|
||||
if token is None:
|
||||
break
|
||||
|
||||
ping = False
|
||||
close = False
|
||||
msg = None
|
||||
if token in ('ci', 'skipci'):
|
||||
pr = (self.source_id or self)
|
||||
if pr._pr_acl(author).is_reviewer:
|
||||
pr.fw_policy = token
|
||||
msg = "Not waiting for CI to create followup forward-ports." if token == 'skipci' else "Waiting for CI to create followup forward-ports."
|
||||
else:
|
||||
ping = True
|
||||
msg = "you can't configure ci."
|
||||
|
||||
if token == 'ignore': # replace 'ignore' by 'up to <pr_branch>'
|
||||
token = 'up'
|
||||
tokens = itertools.chain(['to', self.target.name], tokens)
|
||||
|
||||
if token in ('r+', 'review+'):
|
||||
if not self.source_id:
|
||||
ping = True
|
||||
msg = "I can only do this on forward-port PRs and this is not one, see {}.".format(
|
||||
self.repository.project_id.github_prefix
|
||||
)
|
||||
elif not self.parent_id:
|
||||
ping = True
|
||||
msg = "I can only do this on unmodified forward-port PRs, ask {}.".format(
|
||||
self.repository.project_id.github_prefix
|
||||
)
|
||||
else:
|
||||
merge_bot = self.repository.project_id.github_prefix
|
||||
# don't update the root ever
|
||||
for pr in (p for p in self._iter_ancestors() if p.parent_id if p.state in RPLUS):
|
||||
# only the author is delegated explicitely on the
|
||||
pr._parse_commands(author, {**comment, 'body': merge_bot + ' r+'}, login)
|
||||
elif token == 'close':
|
||||
if self.source_id._pr_acl(author).is_reviewer:
|
||||
close = True
|
||||
else:
|
||||
ping = True
|
||||
msg = "you can't close PRs."
|
||||
|
||||
elif token == 'up' and next(tokens, None) == 'to':
|
||||
limit = next(tokens, None)
|
||||
ping = True
|
||||
if not self._pr_acl(author).is_author:
|
||||
msg = "you can't set a forward-port limit."
|
||||
elif not limit:
|
||||
msg = "please provide a branch to forward-port to."
|
||||
else:
|
||||
ping, msg = self._maybe_update_limit(limit)
|
||||
|
||||
if msg or close:
|
||||
if msg:
|
||||
_logger.info("%s [%s]: %s", self.display_name, login, msg)
|
||||
else:
|
||||
_logger.info("%s [%s]: closing", self.display_name, login)
|
||||
self.env['runbot_merge.pull_requests.feedback'].create({
|
||||
'repository': self.repository.id,
|
||||
'pull_request': self.number,
|
||||
'message': f'@{author.github_login} {msg}' if msg and ping else msg,
|
||||
'close': close,
|
||||
'token_field': 'fp_github_token',
|
||||
})
|
||||
|
||||
def _maybe_update_limit(self, limit: str) -> typing.Tuple[bool, str]:
|
||||
limit_id = self.env['runbot_merge.branch'].with_context(active_test=False).search([
|
||||
('project_id', '=', self.repository.project_id.id),
|
||||
('name', '=', limit),
|
||||
])
|
||||
if not limit_id:
|
||||
return True, f"there is no branch {limit!r}, it can't be used as a forward port target."
|
||||
|
||||
if limit_id != self.target and not limit_id.active:
|
||||
return True, f"branch {limit_id.name!r} is disabled, it can't be used as a forward port target."
|
||||
|
||||
# not forward ported yet, just acknowledge the request
|
||||
if not self.source_id and self.state != 'merged':
|
||||
self.limit_id = limit_id
|
||||
if branch_key(limit_id) <= branch_key(self.target):
|
||||
return False, "Forward-port disabled."
|
||||
else:
|
||||
return False, f"Forward-porting to {limit_id.name!r}."
|
||||
|
||||
# if the PR has been forwardported
|
||||
prs = (self | self.forwardport_ids | self.source_id | self.source_id.forwardport_ids)
|
||||
tip = max(prs, key=pr_key)
|
||||
# if the fp tip was closed it's fine
|
||||
if tip.state == 'closed':
|
||||
return True, f"{tip.display_name} is closed, no forward porting is going on"
|
||||
|
||||
prs.limit_id = limit_id
|
||||
|
||||
real_limit = max(limit_id, tip.target, key=branch_key)
|
||||
|
||||
addendum = ''
|
||||
# check if tip was queued for forward porting, try to cancel if we're
|
||||
# supposed to stop here
|
||||
if real_limit == tip.target and (task := self.env['forwardport.batches'].search([('batch_id', 'in', tip.batch_ids.ids)])):
|
||||
try:
|
||||
with self.env.cr.savepoint():
|
||||
self.env.cr.execute(
|
||||
"SELECT FROM forwardport_batches "
|
||||
"WHERE id = %s FOR UPDATE NOWAIT",
|
||||
[task.id])
|
||||
except psycopg2.errors.LockNotAvailable:
|
||||
# row locked = port occurring and probably going to succeed,
|
||||
# so next(real_limit) likely a done deal already
|
||||
return True, (
|
||||
f"Forward port of {tip.display_name} likely already "
|
||||
f"ongoing, unable to cancel, close next forward port "
|
||||
f"when it completes.")
|
||||
else:
|
||||
self.env.cr.execute("DELETE FROM forwardport_batches WHERE id = %s", [task.id])
|
||||
|
||||
if real_limit != tip.target:
|
||||
# forward porting was previously stopped at tip, and we want it to
|
||||
# resume
|
||||
if tip.state == 'merged':
|
||||
self.env['forwardport.batches'].create({
|
||||
'batch_id': tip.batch_ids.sorted('id')[-1].id,
|
||||
'source': 'fp' if tip.parent_id else 'merge',
|
||||
})
|
||||
resumed = tip
|
||||
else:
|
||||
# reactivate batch
|
||||
tip.batch_ids.sorted('id')[-1].active = True
|
||||
resumed = tip._schedule_fp_followup()
|
||||
if resumed:
|
||||
addendum += f', resuming forward-port stopped at {tip.display_name}'
|
||||
|
||||
if real_limit != limit_id:
|
||||
addendum += f' (instead of the requested {limit_id.name!r} because {tip.display_name} already exists)'
|
||||
|
||||
# get a "stable" root rather than self's to avoid divertences between
|
||||
# PRs across a root divide (where one post-root would point to the root,
|
||||
# and one pre-root would point to the source, or a previous root)
|
||||
root = tip.root_id
|
||||
# reference the root being forward ported unless we are the root
|
||||
root_ref = '' if root == self else f' {root.display_name}'
|
||||
msg = f"Forward-porting{root_ref} to {real_limit.name!r}{addendum}."
|
||||
# send a message to the source & root except for self, if they exist
|
||||
root_msg = f'Forward-porting to {real_limit.name!r} (from {self.display_name}).'
|
||||
self.env['runbot_merge.pull_requests.feedback'].create([
|
||||
{
|
||||
'repository': p.repository.id,
|
||||
'pull_request': p.number,
|
||||
'message': root_msg,
|
||||
'token_field': 'fp_github_token',
|
||||
}
|
||||
# send messages to source and root unless root is self (as it
|
||||
# already gets the normal message)
|
||||
for p in (self.source_id | root) - self
|
||||
])
|
||||
|
||||
return False, msg
|
||||
|
||||
def _notify_ci_failed(self, ci):
|
||||
# only care about FP PRs which are not staged / merged yet
|
||||
# NB: probably ignore approved PRs as normal message will handle them?
|
||||
@ -595,86 +287,9 @@ class PullRequests(models.Model):
|
||||
|
||||
def _validate(self, statuses):
|
||||
failed = super()._validate(statuses)
|
||||
self._schedule_fp_followup()
|
||||
self.batch_id._schedule_fp_followup()
|
||||
return failed
|
||||
|
||||
def _schedule_fp_followup(self):
|
||||
_logger = logging.getLogger(__name__).getChild('forwardport.next')
|
||||
# if the PR has a parent and is CI-validated, enqueue the next PR
|
||||
scheduled = self.browse(())
|
||||
for pr in self:
|
||||
_logger.info('Checking if forward-port %s (%s)', pr.display_name, pr)
|
||||
if not pr.parent_id:
|
||||
_logger.info('-> no parent %s (%s)', pr.display_name, pr.parent_id)
|
||||
continue
|
||||
if not self.env.context.get('force_fw') and self.source_id.fw_policy != 'skipci' and pr.state not in ['validated', 'ready']:
|
||||
_logger.info('-> wrong state %s (%s)', pr.display_name, pr.state)
|
||||
continue
|
||||
|
||||
# check if we've already forward-ported this branch:
|
||||
# it has a batch without a staging
|
||||
batch = self.env['runbot_merge.batch'].with_context(active_test=False).search([
|
||||
('staging_id', '=', False),
|
||||
('prs', 'in', pr.id),
|
||||
], limit=1)
|
||||
# if the batch is inactive, the forward-port has been done *or*
|
||||
# the PR's own forward port is in error, so bail
|
||||
if not batch.active:
|
||||
_logger.info('-> forward port done or in error (%s.active=%s)', batch, batch.active)
|
||||
continue
|
||||
|
||||
# otherwise check if we already have a pending forward port
|
||||
_logger.info("%s %s %s", pr.display_name, batch, ', '.join(batch.mapped('prs.display_name')))
|
||||
if self.env['forwardport.batches'].search_count([('batch_id', '=', batch.id)]):
|
||||
_logger.warning('-> already recorded')
|
||||
continue
|
||||
|
||||
# check if batch-mate are all valid
|
||||
mates = batch.prs
|
||||
# wait until all of them are validated or ready
|
||||
if not self.env.context.get('force_fw') and any(pr.source_id.fw_policy != 'skipci' and pr.state not in ('validated', 'ready') for pr in mates):
|
||||
_logger.info("-> not ready (%s)", [(pr.display_name, pr.state) for pr in mates])
|
||||
continue
|
||||
|
||||
# check that there's no weird-ass state
|
||||
if not all(pr.parent_id for pr in mates):
|
||||
_logger.warning("Found a batch (%s) with only some PRs having parents, ignoring", mates)
|
||||
continue
|
||||
if self.search_count([('parent_id', 'in', mates.ids)]):
|
||||
_logger.warning("Found a batch (%s) with only some of the PRs having children", mates)
|
||||
continue
|
||||
|
||||
_logger.info('-> ok')
|
||||
self.env['forwardport.batches'].create({
|
||||
'batch_id': batch.id,
|
||||
'source': 'fp',
|
||||
})
|
||||
scheduled |= pr
|
||||
return scheduled
|
||||
|
||||
def _find_next_target(self, reference):
|
||||
""" Finds the branch between target and limit_id which follows
|
||||
reference
|
||||
"""
|
||||
if reference.target == self.limit_id:
|
||||
return
|
||||
# NOTE: assumes even disabled branches are properly sequenced, would
|
||||
# probably be a good idea to have the FP view show all branches
|
||||
branches = list(self.target.project_id
|
||||
.with_context(active_test=False)
|
||||
._forward_port_ordered(ast.literal_eval(self.repository.branch_filter or '[]')))
|
||||
|
||||
# get all branches between max(root.target, ref.target) (excluded) and limit (included)
|
||||
from_ = max(branches.index(self.target), branches.index(reference.target))
|
||||
to_ = branches.index(self.limit_id)
|
||||
|
||||
# return the first active branch in the set
|
||||
return next((
|
||||
branch
|
||||
for branch in branches[from_+1:to_+1]
|
||||
if branch.active
|
||||
), None)
|
||||
|
||||
def _commits_lazy(self):
|
||||
s = requests.Session()
|
||||
s.headers['Authorization'] = 'token %s' % self.repository.project_id.fp_github_token
|
||||
@ -703,249 +318,6 @@ class PullRequests(models.Model):
|
||||
}
|
||||
return sorted(commits, key=lambda c: idx[c['sha']])
|
||||
|
||||
def _iter_ancestors(self):
|
||||
while self:
|
||||
yield self
|
||||
self = self.parent_id
|
||||
|
||||
def _iter_descendants(self):
|
||||
pr = self
|
||||
while pr := self.search([('parent_id', '=', pr.id)]):
|
||||
yield pr
|
||||
|
||||
@api.depends('parent_id.statuses')
|
||||
def _compute_statuses(self):
|
||||
super()._compute_statuses()
|
||||
|
||||
def _get_overrides(self):
|
||||
# NB: assumes _get_overrides always returns an "owned" dict which we can modify
|
||||
p = self.parent_id._get_overrides() if self.parent_id else {}
|
||||
p.update(super()._get_overrides())
|
||||
return p
|
||||
|
||||
def _port_forward(self):
|
||||
if not self:
|
||||
return
|
||||
|
||||
all_sources = [(p.source_id or p) for p in self]
|
||||
all_targets = [s._find_next_target(p) for s, p in zip(all_sources, self)]
|
||||
|
||||
ref = self[0]
|
||||
base = all_sources[0]
|
||||
target = all_targets[0]
|
||||
if target is None:
|
||||
_logger.info(
|
||||
"Will not forward-port %s: no next target",
|
||||
ref.display_name,
|
||||
)
|
||||
return # QUESTION: do the prs need to be updated?
|
||||
|
||||
# check if the PRs have already been forward-ported: is there a PR
|
||||
# with the same source targeting the next branch in the series
|
||||
for source in all_sources:
|
||||
if self.search_count([('source_id', '=', source.id), ('target', '=', target.id)]):
|
||||
_logger.info("Will not forward-port %s: already ported", ref.display_name)
|
||||
return
|
||||
|
||||
# check if all PRs in the batch have the same "next target" , bail if
|
||||
# that's not the case as it doesn't make sense for forward one PR from
|
||||
# a to b and a linked pr from a to c
|
||||
different_target = next((t for t in all_targets if t != target), None)
|
||||
if different_target:
|
||||
different_pr = next(p for p, t in zip(self, all_targets) if t == different_target)
|
||||
for pr, t in zip(self, all_targets):
|
||||
linked, other = different_pr, different_target
|
||||
if t != target:
|
||||
linked, other = ref, target
|
||||
self.env.ref('runbot_merge.forwardport.failure.discrepancy')._send(
|
||||
repository=pr.repository,
|
||||
pull_request=pr.number,
|
||||
token_field='fp_github_token',
|
||||
format_args={'pr': pr, 'linked': linked, 'next': t.name, 'other': other.name},
|
||||
)
|
||||
_logger.warning(
|
||||
"Cancelling forward-port of %s: found different next branches (%s)",
|
||||
self, all_targets
|
||||
)
|
||||
return
|
||||
|
||||
proj = self.mapped('target.project_id')
|
||||
if not proj.fp_github_token:
|
||||
_logger.warning(
|
||||
"Can not forward-port %s: no token on project %s",
|
||||
ref.display_name,
|
||||
proj.name
|
||||
)
|
||||
return
|
||||
|
||||
notarget = [p.repository.name for p in self if not p.repository.fp_remote_target]
|
||||
if notarget:
|
||||
_logger.error(
|
||||
"Can not forward-port %s: repos %s don't have a remote configured",
|
||||
self, ', '.join(notarget)
|
||||
)
|
||||
return
|
||||
|
||||
# take only the branch bit
|
||||
new_branch = '%s-%s-%s-fw' % (
|
||||
target.name,
|
||||
base.refname,
|
||||
# avoid collisions between fp branches (labels can be reused
|
||||
# or conflict especially as we're chopping off the owner)
|
||||
base64.urlsafe_b64encode(os.urandom(3)).decode()
|
||||
)
|
||||
# TODO: send outputs to logging?
|
||||
conflicts = {}
|
||||
with contextlib.ExitStack() as s:
|
||||
for pr in self:
|
||||
conflicts[pr], working_copy = pr._create_fp_branch(
|
||||
target, new_branch, s)
|
||||
|
||||
working_copy.push('target', new_branch)
|
||||
|
||||
gh = requests.Session()
|
||||
gh.headers['Authorization'] = 'token %s' % proj.fp_github_token
|
||||
has_conflicts = any(conflicts.values())
|
||||
# problemo: this should forward port a batch at a time, if porting
|
||||
# one of the PRs in the batch fails is huge problem, though this loop
|
||||
# only concerns itself with the creation of the followup objects so...
|
||||
new_batch = self.browse(())
|
||||
self.env.cr.execute('LOCK runbot_merge_pull_requests IN SHARE MODE')
|
||||
for pr in self:
|
||||
owner, _ = pr.repository.fp_remote_target.split('/', 1)
|
||||
source = pr.source_id or pr
|
||||
root = pr.root_id
|
||||
|
||||
message = source.message + '\n\n' + '\n'.join(
|
||||
"Forward-Port-Of: %s" % p.display_name
|
||||
for p in root | source
|
||||
)
|
||||
|
||||
title, body = re.match(r'(?P<title>[^\n]+)\n*(?P<body>.*)', message, flags=re.DOTALL).groups()
|
||||
r = gh.post(f'https://api.github.com/repos/{pr.repository.name}/pulls', json={
|
||||
'base': target.name,
|
||||
'head': f'{owner}:{new_branch}',
|
||||
'title': '[FW]' + (' ' if title[0] != '[' else '') + title,
|
||||
'body': body
|
||||
})
|
||||
if not r.ok:
|
||||
_logger.warning("Failed to create forward-port PR for %s, deleting branches", pr.display_name)
|
||||
# delete all the branches this should automatically close the
|
||||
# PRs if we've created any. Using the API here is probably
|
||||
# simpler than going through the working copies
|
||||
for repo in self.mapped('repository'):
|
||||
d = gh.delete(f'https://api.github.com/repos/{repo.fp_remote_target}/git/refs/heads/{new_branch}')
|
||||
if d.ok:
|
||||
_logger.info("Deleting %s:%s=success", repo.fp_remote_target, new_branch)
|
||||
else:
|
||||
_logger.warning("Deleting %s:%s=%s", repo.fp_remote_target, new_branch, d.text)
|
||||
raise RuntimeError("Forwardport failure: %s (%s)" % (pr.display_name, r.text))
|
||||
|
||||
new_pr = self._from_gh(r.json())
|
||||
_logger.info("Created forward-port PR %s", new_pr)
|
||||
new_batch |= new_pr
|
||||
|
||||
# allows PR author to close or skipci
|
||||
source.delegates |= source.author
|
||||
new_pr.write({
|
||||
'merge_method': pr.merge_method,
|
||||
'source_id': source.id,
|
||||
# only link to previous PR of sequence if cherrypick passed
|
||||
'parent_id': pr.id if not has_conflicts else False,
|
||||
'detach_reason': "conflicts: {}".format(
|
||||
f'\n{conflicts[pr]}\n{conflicts[pr]}'.strip()
|
||||
) if has_conflicts else None,
|
||||
# Copy author & delegates of source as well as delegates of
|
||||
# previous so they can r+ the new forward ports.
|
||||
'delegates': [(6, False, (source.delegates | pr.delegates).ids)]
|
||||
})
|
||||
if has_conflicts and pr.parent_id and pr.state not in ('merged', 'closed'):
|
||||
self.env.ref('runbot_merge.forwardport.failure.conflict')._send(
|
||||
repository=pr.repository,
|
||||
pull_request=pr.number,
|
||||
token_field='fp_github_token',
|
||||
format_args={'source': source, 'pr': pr, 'new': new_pr, 'footer': footer},
|
||||
)
|
||||
|
||||
for pr, new_pr in zip(self, new_batch):
|
||||
(h, out, err, hh) = conflicts.get(pr) or (None, None, None, None)
|
||||
|
||||
if h:
|
||||
sout = serr = ''
|
||||
if out.strip():
|
||||
sout = f"\nstdout:\n```\n{out}\n```\n"
|
||||
if err.strip():
|
||||
serr = f"\nstderr:\n```\n{err}\n```\n"
|
||||
|
||||
lines = ''
|
||||
if len(hh) > 1:
|
||||
lines = '\n' + ''.join(
|
||||
'* %s%s\n' % (sha, ' <- on this commit' if sha == h else '')
|
||||
for sha in hh
|
||||
)
|
||||
template = 'runbot_merge.forwardport.failure'
|
||||
format_args = {
|
||||
'pr': new_pr,
|
||||
'commits': lines,
|
||||
'stdout': sout,
|
||||
'stderr': serr,
|
||||
'footer': footer,
|
||||
}
|
||||
elif has_conflicts:
|
||||
template = 'runbot_merge.forwardport.linked'
|
||||
format_args = {
|
||||
'pr': new_pr,
|
||||
'siblings': ', '.join(p.display_name for p in (new_batch - new_pr)),
|
||||
'footer': footer,
|
||||
}
|
||||
elif base._find_next_target(new_pr) is None:
|
||||
ancestors = "".join(
|
||||
"* %s\n" % p.display_name
|
||||
for p in pr._iter_ancestors()
|
||||
if p.parent_id
|
||||
)
|
||||
template = 'runbot_merge.forwardport.final'
|
||||
format_args = {
|
||||
'pr': new_pr,
|
||||
'containing': ' containing:' if ancestors else '.',
|
||||
'ancestors': ancestors,
|
||||
'footer': footer,
|
||||
}
|
||||
else:
|
||||
template = 'runbot_merge.forwardport.intermediate'
|
||||
format_args = {
|
||||
'pr': new_pr,
|
||||
'footer': footer,
|
||||
}
|
||||
self.env.ref(template)._send(
|
||||
repository=new_pr.repository,
|
||||
pull_request=new_pr.number,
|
||||
token_field='fp_github_token',
|
||||
format_args=format_args,
|
||||
)
|
||||
|
||||
labels = ['forwardport']
|
||||
if has_conflicts:
|
||||
labels.append('conflict')
|
||||
self.env['runbot_merge.pull_requests.tagging'].create({
|
||||
'repository': new_pr.repository.id,
|
||||
'pull_request': new_pr.number,
|
||||
'tags_add': labels,
|
||||
})
|
||||
|
||||
# batch the PRs so _validate can perform the followup FP properly
|
||||
# (with the entire batch). If there are conflict then create a
|
||||
# deactivated batch so the interface is coherent but we don't pickup
|
||||
# an active batch we're never going to deactivate.
|
||||
b = self.env['runbot_merge.batch'].create({
|
||||
'target': target.id,
|
||||
'prs': [(6, 0, new_batch.ids)],
|
||||
'active': not has_conflicts,
|
||||
})
|
||||
# if we're not waiting for CI, schedule followup immediately
|
||||
if any(p.source_id.fw_policy == 'skipci' for p in b.prs):
|
||||
b.prs[0]._schedule_fp_followup()
|
||||
return b
|
||||
|
||||
def _create_fp_branch(self, target_branch, fp_branch_name, cleanup):
|
||||
""" Creates a forward-port for the current PR to ``target_branch`` under
|
||||
@ -1002,7 +374,7 @@ class PullRequests(models.Model):
|
||||
# add target remote
|
||||
working_copy.remote(
|
||||
'add', 'target',
|
||||
'https://{p.fp_github_name}:{p.fp_github_token}@github.com/{r.fp_remote_target}'.format(
|
||||
'https://{p.fp_github_token}@github.com/{r.fp_remote_target}'.format(
|
||||
r=self.repository,
|
||||
p=project_id
|
||||
)
|
||||
@ -1216,19 +588,6 @@ stderr:
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# ordering is a bit unintuitive because the lowest sequence (and name)
|
||||
# is the last link of the fp chain, reasoning is a bit more natural the
|
||||
# other way around (highest object is the last), especially with Python
|
||||
# not really having lazy sorts in the stdlib
|
||||
def branch_key(b: Branch, /, _key=itemgetter('sequence', 'name')):
|
||||
return Reverse(_key(b))
|
||||
|
||||
|
||||
def pr_key(p: PullRequests, /):
|
||||
return branch_key(p.target)
|
||||
|
||||
|
||||
class Stagings(models.Model):
|
||||
_inherit = 'runbot_merge.stagings'
|
||||
|
||||
@ -1236,7 +595,7 @@ class Stagings(models.Model):
|
||||
r = super().write(vals)
|
||||
# we've just deactivated a successful staging (so it got ~merged)
|
||||
if vals.get('active') is False and self.state == 'success':
|
||||
# check al batches to see if they should be forward ported
|
||||
# check all batches to see if they should be forward ported
|
||||
for b in self.with_context(active_test=False).batch_ids:
|
||||
# if all PRs of a batch have parents they're part of an FP
|
||||
# sequence and thus handled separately, otherwise they're
|
||||
|
@ -1,4 +1,6 @@
|
||||
from utils import Commit, make_basic
|
||||
import re
|
||||
|
||||
from utils import Commit, make_basic, to_pr, seen, re_matches
|
||||
|
||||
|
||||
def test_single_updated(env, config, make_repo):
|
||||
@ -87,3 +89,318 @@ def test_single_updated(env, config, make_repo):
|
||||
|
||||
assert pr22_id.source_id == pr2_id
|
||||
assert pr22_id.parent_id == pr21_id
|
||||
|
||||
def test_closing_during_fp(env, config, make_repo, users):
|
||||
""" Closing a PR after it's been ported once should not port it further, but
|
||||
the rest of the batch should carry on
|
||||
"""
|
||||
r1, _ = make_basic(env, config, make_repo)
|
||||
r2, _ = make_basic(env, config, make_repo)
|
||||
env['runbot_merge.repository'].search([]).required_statuses = 'default'
|
||||
|
||||
with r1, r2:
|
||||
r1.make_commits('a', Commit('1', tree={'1': '0'}), ref='heads/aref')
|
||||
pr1 = r1.make_pr(target='a', head='aref')
|
||||
r1.post_status('aref', 'success')
|
||||
pr1.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
|
||||
r2.make_commits('a', Commit('2', tree={'2': '0'}), ref='heads/aref')
|
||||
pr2 = r2.make_pr(target='a', head='aref')
|
||||
r2.post_status('aref', 'success')
|
||||
pr2.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
with r1, r2:
|
||||
r1.post_status('staging.a', 'success')
|
||||
r2.post_status('staging.a', 'success')
|
||||
env.run_crons()
|
||||
|
||||
pr1_id = to_pr(env, pr1)
|
||||
[pr1_1_id] = pr1_id.forwardport_ids
|
||||
pr2_id = to_pr(env, pr2)
|
||||
[pr2_1_id] = pr2_id.forwardport_ids
|
||||
|
||||
with r1:
|
||||
r1.get_pr(pr1_1_id.number).close(config['role_user']['token'])
|
||||
|
||||
with r2:
|
||||
r2.post_status(pr2_1_id.head, 'success')
|
||||
env.run_crons()
|
||||
|
||||
assert env['runbot_merge.pull_requests'].search_count([]) == 5,\
|
||||
"only one of the forward ports should be ported"
|
||||
assert not env['runbot_merge.pull_requests'].search([('parent_id', '=', pr1_1_id.id)]),\
|
||||
"the closed PR should not be ported"
|
||||
assert env['runbot_merge.pull_requests'].search([('source_id', '=', pr1_id.id)]) == pr1_1_id,\
|
||||
"the closed PR should not be ported"
|
||||
|
||||
r1_b_head = r1.commit("b")
|
||||
with r2:
|
||||
r2.get_pr(pr2_1_id.number).post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
assert not pr2_1_id.blocked
|
||||
assert not pr2_1_id.batch_id.blocked
|
||||
st = pr2_1_id.staging_id
|
||||
assert st
|
||||
with r1, r2:
|
||||
r1.post_status('staging.b', 'success')
|
||||
r2.post_status('staging.b', 'success')
|
||||
env.run_crons()
|
||||
assert st.state == 'success'
|
||||
|
||||
assert r1_b_head.id == r1.commit("b").id, \
|
||||
"r1:b's head should not have been touched"
|
||||
|
||||
def test_add_pr_during_fp(env, config, make_repo, users):
|
||||
""" It should be possible to add new PRs to an FP batch
|
||||
"""
|
||||
r1, _ = make_basic(env, config, make_repo, statuses="default")
|
||||
r2, fork2 = make_basic(env, config, make_repo, statuses="default")
|
||||
# needs a "d" branch
|
||||
env['runbot_merge.project'].search([]).write({
|
||||
'branch_ids': [(0, 0, {'name': 'd', 'sequence': 40})],
|
||||
})
|
||||
with r1, r2:
|
||||
r1.make_ref("heads/d", r1.commit("c").id)
|
||||
r2.make_ref("heads/d", r2.commit("c").id)
|
||||
|
||||
with r1:
|
||||
r1.make_commits('a', Commit('1', tree={'1': '0'}), ref='heads/aref')
|
||||
pr1_a = r1.make_pr(target='a', head='aref')
|
||||
r1.post_status('aref', 'success')
|
||||
pr1_a.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
with r1, r2:
|
||||
r1.post_status('staging.a', 'success')
|
||||
r2.post_status('staging.a', 'success')
|
||||
env.run_crons()
|
||||
|
||||
pr1_a_id = to_pr(env, pr1_a)
|
||||
[pr1_b_id] = pr1_a_id.forwardport_ids
|
||||
|
||||
with r2, fork2:
|
||||
fork2.make_commits('b', Commit('2', tree={'2': '0'}), ref=f'heads/{pr1_b_id.refname}')
|
||||
pr2_b = r2.make_pr(title="B", target='b', head=f'{fork2.owner}:{pr1_b_id.refname}')
|
||||
env.run_crons()
|
||||
|
||||
pr2_b_id = to_pr(env, pr2_b)
|
||||
|
||||
assert not pr1_b_id.staging_id
|
||||
assert not pr2_b_id.staging_id
|
||||
assert pr1_b_id.batch_id == pr2_b_id.batch_id
|
||||
assert pr1_b_id.state == "opened",\
|
||||
"implicit approval from forward port should have been canceled"
|
||||
batch = pr2_b_id.batch_id
|
||||
|
||||
with r1:
|
||||
r1.post_status(pr1_b_id.head, 'success')
|
||||
r1.get_pr(pr1_b_id.number).post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
assert batch.blocked
|
||||
assert pr1_b_id.blocked
|
||||
|
||||
with r2:
|
||||
r2.post_status(pr2_b.head, "success")
|
||||
pr2_b.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
assert not batch.blocked
|
||||
assert pr1_b_id.staging_id and pr1_b_id.staging_id == pr2_b_id.staging_id
|
||||
|
||||
with r1, r2:
|
||||
r1.post_status('staging.b', 'success')
|
||||
r2.post_status('staging.b', 'success')
|
||||
env.run_crons()
|
||||
|
||||
def find_child(pr):
|
||||
return env['runbot_merge.pull_requests'].search([
|
||||
('parent_id', '=', pr.id),
|
||||
])
|
||||
pr1_c_id = find_child(pr1_b_id)
|
||||
assert pr1_c_id
|
||||
pr2_c_id = find_child(pr2_b_id)
|
||||
assert pr2_c_id
|
||||
|
||||
with r1, r2:
|
||||
r1.post_status(pr1_c_id.head, 'success')
|
||||
r2.post_status(pr2_c_id.head, 'success')
|
||||
env.run_crons()
|
||||
|
||||
assert find_child(pr1_c_id)
|
||||
assert find_child(pr2_c_id)
|
||||
|
||||
def test_add_to_forward_ported(env, config, make_repo, users):
|
||||
"""Add a new branch to an intermediate step of a fw *sequence*, either
|
||||
because skipci or because all the intermediate CI succeeded
|
||||
"""
|
||||
# region setup
|
||||
r1, _ = make_basic(env, config, make_repo, statuses="default")
|
||||
r2, fork2 = make_basic(env, config, make_repo, statuses="default")
|
||||
|
||||
with r1:
|
||||
r1.make_commits('a', Commit('a', tree={'a': 'a'}), ref="heads/pr1")
|
||||
pr1_a = r1.make_pr(target="a", head="pr1")
|
||||
r1.post_status(pr1_a.head, 'success')
|
||||
pr1_a.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
with r1, r2:
|
||||
r1.post_status('staging.a', 'success')
|
||||
r2.post_status('staging.a', 'success')
|
||||
env.run_crons()
|
||||
|
||||
# region port forward
|
||||
pr1_a_id = to_pr(env, pr1_a)
|
||||
pr1_b_id = pr1_a_id.forwardport_ids
|
||||
assert pr1_b_id
|
||||
with r1:
|
||||
r1.post_status(pr1_b_id.head, 'success')
|
||||
env.run_crons()
|
||||
pr1_c_id = pr1_a_id.forwardport_ids - pr1_b_id
|
||||
assert pr1_c_id
|
||||
# endregion
|
||||
# endregion
|
||||
|
||||
# new PR must be in fork for labels to actually match
|
||||
with r2, fork2:
|
||||
# branch in fork has no owner prefix, but HEAD for cross-repo PR does
|
||||
fork2.make_commits("b", Commit('b', tree={'b': 'b'}), ref=f'heads/{pr1_b_id.refname}')
|
||||
pr2_b = r2.make_pr(title="b", target="b", head=pr1_b_id.label)
|
||||
r2.post_status(pr2_b.head, 'success')
|
||||
env.run_crons()
|
||||
|
||||
pr2_b_id = to_pr(env, pr2_b)
|
||||
assert pr2_b_id.batch_id == pr1_b_id.batch_id
|
||||
assert len(pr2_b_id.forwardport_ids) == 1, \
|
||||
"since the batch is already forward ported, the new PR should" \
|
||||
" immediately be forward ported to match"
|
||||
assert pr2_b_id.forwardport_ids.label == pr1_c_id.label
|
||||
|
||||
pr2_a = r1.get_pr(pr1_b_id.number)
|
||||
with r1, r2:
|
||||
pr2_a.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
pr2_b.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
with r1, r2:
|
||||
r1.post_status('staging.b', 'success')
|
||||
r2.post_status('staging.b', 'success')
|
||||
env.run_crons()
|
||||
|
||||
assert pr1_b_id.state == 'merged'
|
||||
assert pr2_b_id.state == 'merged'
|
||||
|
||||
assert len(pr2_b_id.forwardport_ids) == 1,\
|
||||
"verify that pr2_b did not get forward ported again on merge"
|
||||
pr2_c = r2.get_pr(pr2_b_id.forwardport_ids.number)
|
||||
assert pr2_c.comments == [
|
||||
seen(env, pr2_c, users),
|
||||
(users['user'], '''\
|
||||
@{user} this PR targets c and is the last of the forward-port chain.
|
||||
|
||||
To merge the full chain, use
|
||||
> @hansen r+
|
||||
|
||||
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
'''.format_map(users)),
|
||||
]
|
||||
|
||||
def test_add_to_forward_port_conflict(env, config, make_repo, users):
|
||||
"""If a PR is added to an existing forward port sequence, and it causes
|
||||
conflicts when forward ported, it should be treated similarly to an *update*
|
||||
causing a conflict: the PR is still created, but it's set in conflict.
|
||||
"""
|
||||
# region setup
|
||||
r1, _ = make_basic(env, config, make_repo, statuses="default")
|
||||
r2, fork2 = make_basic(env, config, make_repo, statuses="default")
|
||||
project = env['runbot_merge.project'].search([])
|
||||
with r2:
|
||||
r2.make_commits(
|
||||
"c",
|
||||
Commit("C-onflict", tree={"b": "X"}),
|
||||
ref="heads/c"
|
||||
)
|
||||
|
||||
with r1:
|
||||
r1.make_commits('a', Commit('a', tree={'a': 'a'}), ref="heads/pr1")
|
||||
pr1_a = r1.make_pr(target="a", head="pr1")
|
||||
r1.post_status(pr1_a.head, 'success')
|
||||
pr1_a.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
with r1, r2:
|
||||
r1.post_status('staging.a', 'success')
|
||||
r2.post_status('staging.a', 'success')
|
||||
env.run_crons()
|
||||
|
||||
# region port forward
|
||||
pr1_a_id = to_pr(env, pr1_a)
|
||||
pr1_b_id = pr1_a_id.forwardport_ids
|
||||
assert pr1_b_id
|
||||
with r1:
|
||||
r1.post_status(pr1_b_id.head, 'success')
|
||||
env.run_crons()
|
||||
pr1_c_id = pr1_a_id.forwardport_ids - pr1_b_id
|
||||
assert pr1_c_id
|
||||
# endregion
|
||||
# endregion
|
||||
|
||||
# new PR must be in fork for labels to actually match
|
||||
with r2, fork2:
|
||||
# branch in fork has no owner prefix, but HEAD for cross-repo PR does
|
||||
fork2.make_commits("b", Commit('b', tree={'b': 'b'}), ref=f'heads/{pr1_b_id.refname}')
|
||||
pr2_b = r2.make_pr(title="b", target="b", head=pr1_b_id.label)
|
||||
r2.post_status(pr2_b.head, 'success')
|
||||
env.run_crons()
|
||||
|
||||
pr2_b_id = to_pr(env, pr2_b)
|
||||
assert pr2_b_id.batch_id == pr1_b_id.batch_id
|
||||
pr2_c_id = pr2_b_id.forwardport_ids
|
||||
assert len(pr2_c_id) == 1, \
|
||||
"since the batch is already forward ported, the new PR should" \
|
||||
" immediately be forward ported to match"
|
||||
assert pr2_c_id.label == pr1_c_id.label
|
||||
assert not pr2_c_id.parent_id, "conflict -> should be detached"
|
||||
assert pr2_c_id.detach_reason
|
||||
|
||||
pr2_a = r1.get_pr(pr1_b_id.number)
|
||||
with r1, r2:
|
||||
pr2_a.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
pr2_b.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
with r1, r2:
|
||||
r1.post_status('staging.b', 'success')
|
||||
r2.post_status('staging.b', 'success')
|
||||
env.run_crons()
|
||||
|
||||
assert pr1_b_id.state == 'merged'
|
||||
assert pr2_b_id.state == 'merged'
|
||||
|
||||
pr2_c = r2.get_pr(pr2_c_id.number)
|
||||
assert pr2_c.comments == [
|
||||
seen(env, pr2_c, users),
|
||||
# should have conflicts
|
||||
(users['user'], re_matches(r"""@{user} cherrypicking of pull request {previous.display_name} failed\.
|
||||
|
||||
stdout:
|
||||
```
|
||||
Auto-merging b
|
||||
CONFLICT \(add/add\): Merge conflict in b
|
||||
|
||||
```
|
||||
|
||||
stderr:
|
||||
```
|
||||
.*
|
||||
```
|
||||
|
||||
Either perform the forward-port manually \(and push to this branch, proceeding as usual\) or close this PR \(maybe\?\)\.
|
||||
|
||||
In the former case, you may want to edit this PR message as well\.
|
||||
|
||||
:warning: after resolving this conflict, you will need to merge it via @{project.github_prefix}\.
|
||||
|
||||
More info at https://github\.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
""".format(project=project, previous=pr2_b_id, **users), re.DOTALL))
|
||||
]
|
||||
|
@ -93,10 +93,6 @@ In the former case, you may want to edit this PR message as well\.
|
||||
More info at https://github\.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
''', re.DOTALL))
|
||||
]
|
||||
with prod:
|
||||
prc.post_comment(f'@{project.fp_github_name} r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
assert prc_id.state == 'opened', "approving via fw should not work on a conflict"
|
||||
|
||||
prb = prod.get_pr(prb_id.number)
|
||||
assert prb.comments == [
|
||||
@ -108,13 +104,12 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
'''),
|
||||
(users['user'], """@%s @%s the next pull request (%s) is in conflict. \
|
||||
You can merge the chain up to here by saying
|
||||
> @%s r+
|
||||
> @hansen r+
|
||||
|
||||
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
""" % (
|
||||
users['user'], users['reviewer'],
|
||||
prc_id.display_name,
|
||||
project.fp_github_name
|
||||
))
|
||||
]
|
||||
|
||||
|
@ -11,7 +11,6 @@ from utils import seen, Commit, make_basic, to_pr
|
||||
])
|
||||
def test_configure_fp_limit(env, config, make_repo, source, limit, count):
|
||||
prod, other = make_basic(env, config, make_repo)
|
||||
bot_name = env['runbot_merge.project'].search([]).fp_github_name
|
||||
with prod:
|
||||
[c] = prod.make_commits(
|
||||
source, Commit('c', tree={'f': 'g'}),
|
||||
@ -20,7 +19,7 @@ def test_configure_fp_limit(env, config, make_repo, source, limit, count):
|
||||
pr = prod.make_pr(target=source, head='branch')
|
||||
prod.post_status(c, 'success', 'legal/cla')
|
||||
prod.post_status(c, 'success', 'ci/runbot')
|
||||
pr.post_comment(f'hansen r+\n{bot_name} up to {limit}', config['role_reviewer']['token'])
|
||||
pr.post_comment(f'hansen r+ up to {limit}', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
with prod:
|
||||
prod.post_status(f'staging.{source}', 'success', 'legal/cla')
|
||||
@ -38,14 +37,13 @@ def test_ignore(env, config, make_repo):
|
||||
to target
|
||||
"""
|
||||
prod, other = make_basic(env, config, make_repo)
|
||||
bot_name = env['runbot_merge.project'].search([]).fp_github_name
|
||||
branch_a = env['runbot_merge.branch'].search([('name', '=', 'a')])
|
||||
with prod:
|
||||
[c] = prod.make_commits('a', Commit('c', tree={'0': '0'}), ref='heads/mybranch')
|
||||
pr = prod.make_pr(target='a', head='mybranch')
|
||||
prod.post_status(c, 'success', 'legal/cla')
|
||||
prod.post_status(c, 'success', 'ci/runbot')
|
||||
pr.post_comment('hansen r+\n%s ignore' % bot_name, config['role_reviewer']['token'])
|
||||
pr.post_comment('hansen r+ ignore', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
pr_id = env['runbot_merge.pull_requests'].search([('number', '=', pr.number)])
|
||||
assert pr_id.limit_id == branch_a
|
||||
@ -67,13 +65,12 @@ def test_disable(env, config, make_repo, users):
|
||||
"""
|
||||
prod, other = make_basic(env, config, make_repo)
|
||||
project = env['runbot_merge.project'].search([])
|
||||
bot_name = project.fp_github_name
|
||||
with prod:
|
||||
[c] = prod.make_commits('a', Commit('c 0', tree={'0': '0'}), ref='heads/branch0')
|
||||
pr = prod.make_pr(target='a', head='branch0')
|
||||
prod.post_status(c, 'success', 'legal/cla')
|
||||
prod.post_status(c, 'success', 'ci/runbot')
|
||||
pr.post_comment('hansen r+\n%s up to b' % bot_name, config['role_reviewer']['token'])
|
||||
pr.post_comment('hansen r+ up to b', config['role_reviewer']['token'])
|
||||
|
||||
[c] = prod.make_commits('a', Commit('c 1', tree={'1': '1'}), ref='heads/branch1')
|
||||
pr = prod.make_pr(target='a', head='branch1')
|
||||
@ -94,30 +91,28 @@ def test_disable(env, config, make_repo, users):
|
||||
assert p.parent_id == _1
|
||||
assert p.target.name == 'c'
|
||||
|
||||
project.fp_github_token = config['role_other']['token']
|
||||
bot_name = project.fp_github_name
|
||||
with prod:
|
||||
[c] = prod.make_commits('a', Commit('c 2', tree={'2': '2'}), ref='heads/branch2')
|
||||
pr = prod.make_pr(target='a', head='branch2')
|
||||
prod.post_status(c, 'success', 'legal/cla')
|
||||
prod.post_status(c, 'success', 'ci/runbot')
|
||||
pr.post_comment('hansen r+\n%s up to' % bot_name, config['role_reviewer']['token'])
|
||||
pr.post_comment('%s up to b' % bot_name, config['role_reviewer']['token'])
|
||||
pr.post_comment('%s up to foo' % bot_name, config['role_reviewer']['token'])
|
||||
pr.post_comment('%s up to c' % bot_name, config['role_reviewer']['token'])
|
||||
pr.post_comment('hansen r+ up to', config['role_reviewer']['token'])
|
||||
pr.post_comment('hansen up to b', config['role_reviewer']['token'])
|
||||
pr.post_comment('hansen up to foo', config['role_reviewer']['token'])
|
||||
pr.post_comment('hansen up to c', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
# use a set because git webhooks delays might lead to mis-ordered
|
||||
# responses and we don't care that much
|
||||
assert set(pr.comments) == {
|
||||
(users['reviewer'], "hansen r+\n%s up to" % bot_name),
|
||||
(users['other'], "@%s please provide a branch to forward-port to." % users['reviewer']),
|
||||
(users['reviewer'], "%s up to b" % bot_name),
|
||||
(users['other'], "@%s branch 'b' is disabled, it can't be used as a forward port target." % users['reviewer']),
|
||||
(users['reviewer'], "%s up to foo" % bot_name),
|
||||
(users['other'], "@%s there is no branch 'foo', it can't be used as a forward port target." % users['reviewer']),
|
||||
(users['reviewer'], "%s up to c" % bot_name),
|
||||
(users['other'], "Forward-porting to 'c'."),
|
||||
(users['reviewer'], "hansen r+ up to"),
|
||||
(users['user'], "@{reviewer} please provide a branch to forward-port to.".format_map(users)),
|
||||
(users['reviewer'], "hansen up to b"),
|
||||
(users['user'], "@{reviewer} branch 'b' is disabled, it can't be used as a forward port target.".format_map(users)),
|
||||
(users['reviewer'], "hansen up to foo"),
|
||||
(users['user'], "@{reviewer} there is no branch 'foo', it can't be used as a forward port target.".format_map(users)),
|
||||
(users['reviewer'], "hansen up to c"),
|
||||
(users['user'], "Forward-porting to 'c'."),
|
||||
seen(env, pr, users),
|
||||
}
|
||||
|
||||
@ -127,7 +122,6 @@ def test_limit_after_merge(env, config, make_repo, users):
|
||||
reviewer = config['role_reviewer']['token']
|
||||
branch_b = env['runbot_merge.branch'].search([('name', '=', 'b')])
|
||||
branch_c = env['runbot_merge.branch'].search([('name', '=', 'c')])
|
||||
bot_name = env['runbot_merge.project'].search([]).fp_github_name
|
||||
with prod:
|
||||
[c] = prod.make_commits('a', Commit('c', tree={'0': '0'}), ref='heads/abranch')
|
||||
pr1 = prod.make_pr(target='a', head='abranch')
|
||||
@ -142,18 +136,18 @@ def test_limit_after_merge(env, config, make_repo, users):
|
||||
env.run_crons()
|
||||
|
||||
p1, p2 = env['runbot_merge.pull_requests'].search([], order='number')
|
||||
assert p1.limit_id == p2.limit_id == branch_c, "check that limit is correctly set"
|
||||
assert p1.limit_id == p2.limit_id == env['runbot_merge.branch'].browse(())
|
||||
pr2 = prod.get_pr(p2.number)
|
||||
with prod:
|
||||
pr1.post_comment(bot_name + ' up to b', reviewer)
|
||||
pr2.post_comment(bot_name + ' up to b', reviewer)
|
||||
pr1.post_comment('hansen up to b', reviewer)
|
||||
pr2.post_comment('hansen up to b', reviewer)
|
||||
env.run_crons()
|
||||
|
||||
assert p1.limit_id == p2.limit_id == branch_b
|
||||
assert pr1.comments == [
|
||||
(users['reviewer'], "hansen r+"),
|
||||
seen(env, pr1, users),
|
||||
(users['reviewer'], f'{bot_name} up to b'),
|
||||
(users['reviewer'], 'hansen up to b'),
|
||||
(users['user'], "Forward-porting to 'b'."),
|
||||
(users['user'], f"Forward-porting to 'b' (from {p2.display_name})."),
|
||||
]
|
||||
@ -164,7 +158,7 @@ This PR targets b and is part of the forward-port chain. Further PRs will be cre
|
||||
|
||||
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
"""),
|
||||
(users['reviewer'], f'{bot_name} up to b'),
|
||||
(users['reviewer'], 'hansen up to b'),
|
||||
(users['user'], f"Forward-porting {p1.display_name} to 'b'."),
|
||||
]
|
||||
|
||||
@ -181,16 +175,12 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
assert p2.source_id == p1
|
||||
|
||||
with prod:
|
||||
pr2.post_comment(f'{bot_name} up to c', reviewer)
|
||||
pr2.post_comment('hansen up to c', reviewer)
|
||||
env.run_crons()
|
||||
|
||||
assert pr2.comments[4:] == [
|
||||
(users['user'], "@%s @%s this PR was modified / updated and has become a normal PR. "
|
||||
"It should be merged the normal way (via @%s)" % (
|
||||
users['user'], users['reviewer'],
|
||||
p2.repository.project_id.github_prefix
|
||||
)),
|
||||
(users['reviewer'], f'{bot_name} up to c'),
|
||||
(users['user'], f"@{users['user']} @{users['reviewer']} this PR was modified / updated and has become a normal PR. It must be merged directly."),
|
||||
(users['reviewer'], 'hansen up to c'),
|
||||
(users['user'], "Forward-porting to 'c'."),
|
||||
]
|
||||
with prod:
|
||||
@ -207,7 +197,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
assert p3
|
||||
pr3 = prod.get_pr(p3.number)
|
||||
with prod:
|
||||
pr3.post_comment(f"{bot_name} up to c", reviewer)
|
||||
pr3.post_comment("hansen up to c", reviewer)
|
||||
env.run_crons()
|
||||
assert pr3.comments == [
|
||||
seen(env, pr3, users),
|
||||
@ -215,11 +205,11 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
@{users['user']} @{users['reviewer']} this PR targets c and is the last of the forward-port chain.
|
||||
|
||||
To merge the full chain, use
|
||||
> @{p1.repository.project_id.fp_github_name} r+
|
||||
> @hansen r+
|
||||
|
||||
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
"""),
|
||||
(users['reviewer'], f"{bot_name} up to c"),
|
||||
(users['reviewer'], "hansen up to c"),
|
||||
(users['user'], f"Forward-porting {p2.display_name} to 'c'."),
|
||||
]
|
||||
# 7 of previous check, plus r+
|
||||
@ -268,7 +258,7 @@ def test_post_merge(
|
||||
from_id = PRs.search(update_from(source.id))
|
||||
from_ = prod.get_pr(from_id.number)
|
||||
with prod:
|
||||
from_.post_comment(f'{project.fp_github_name} up to {limit}', reviewer)
|
||||
from_.post_comment(f'hansen up to {limit}', reviewer)
|
||||
env.run_crons()
|
||||
|
||||
# there should always be a comment on the source and root indicating how
|
||||
@ -314,7 +304,7 @@ def test_resume_fw(env, post_merge, users, config, branches, mode):
|
||||
# fetch source PR
|
||||
[source] = PRs.search([('source_id', '=', False)])
|
||||
with prod:
|
||||
prod.get_pr(source.number).post_comment(f'{project.fp_github_name} up to 5', reviewer)
|
||||
prod.get_pr(source.number).post_comment('hansen up to 5', reviewer)
|
||||
# validate the forward ports for "child", "root", and "parent" so "current"
|
||||
# exists and we have one more target
|
||||
for branch in map(str, range(2, 5+1)):
|
||||
@ -336,12 +326,11 @@ def test_resume_fw(env, post_merge, users, config, branches, mode):
|
||||
numbers = range(5 if mode == 'mergetip' else 2, 5 + 1)
|
||||
with prod:
|
||||
for number in numbers:
|
||||
prod.get_pr(number).post_comment(f'{project.github_prefix} r+', reviewer)
|
||||
prod.get_pr(number).post_comment('hansen r+', reviewer)
|
||||
env.run_crons()
|
||||
with prod:
|
||||
for target in numbers:
|
||||
pr = PRs.search([('target.name', '=', str(target))])
|
||||
print(pr.display_name, pr.state, pr.staging_id)
|
||||
prod.post_status(f'staging.{target}', 'success')
|
||||
env.run_crons()
|
||||
for number in numbers:
|
||||
@ -349,7 +338,7 @@ def test_resume_fw(env, post_merge, users, config, branches, mode):
|
||||
|
||||
from_ = prod.get_pr(source.number)
|
||||
with prod:
|
||||
from_.post_comment(f'{project.fp_github_name} up to 6', reviewer)
|
||||
from_.post_comment('hansen up to 6', reviewer)
|
||||
env.run_crons()
|
||||
|
||||
if mode == 'failbump':
|
||||
@ -378,6 +367,7 @@ def setci(*, source, repo, target, status='success'):
|
||||
in ``repo``.
|
||||
"""
|
||||
pr = source.search([('source_id', '=', source.id), ('target.name', '=', str(target))])
|
||||
assert pr, f"could not find forward port of {source.display_name} to {target}"
|
||||
with repo:
|
||||
repo.post_status(pr.head, status)
|
||||
|
||||
@ -419,7 +409,6 @@ def post_merge(env, config, users, make_repo, branches):
|
||||
'github_prefix': 'hansen',
|
||||
'fp_github_token': config['github']['token'],
|
||||
'fp_github_name': 'herbert',
|
||||
'fp_github_email': 'hb@example.com',
|
||||
'branch_ids': [
|
||||
(0, 0, {'name': str(i), 'sequence': 1000 - (i * 10)})
|
||||
for i in branches
|
||||
@ -439,7 +428,6 @@ def post_merge(env, config, users, make_repo, branches):
|
||||
'review_rights': [(0, 0, {'repository_id': proj.repo_ids.id, 'review': True})]
|
||||
})
|
||||
|
||||
mbot = proj.github_prefix
|
||||
reviewer = config['role_reviewer']['token']
|
||||
# merge the source PR
|
||||
source_target = str(branches[0])
|
||||
@ -448,7 +436,7 @@ def post_merge(env, config, users, make_repo, branches):
|
||||
pr1 = prod.make_pr(target=source_target, head=c, title="a title")
|
||||
|
||||
prod.post_status(c, 'success')
|
||||
pr1.post_comment(f'{mbot} r+', reviewer)
|
||||
pr1.post_comment('hansen r+', reviewer)
|
||||
env.run_crons()
|
||||
with prod:
|
||||
prod.post_status(f'staging.{source_target}', 'success')
|
||||
|
@ -12,39 +12,45 @@ def test_override_inherited(env, config, make_repo, users):
|
||||
"""
|
||||
repo, other = make_basic(env, config, make_repo)
|
||||
project = env['runbot_merge.project'].search([])
|
||||
project.repo_ids.status_ids = [(5, 0, 0), (0, 0, {'context': 'default'})]
|
||||
env['res.partner'].search([('github_login', '=', users['reviewer'])])\
|
||||
.write({'override_rights': [(0, 0, {
|
||||
'repository_id': project.repo_ids.id,
|
||||
'context': 'ci/runbot',
|
||||
'context': 'default',
|
||||
})]})
|
||||
|
||||
with repo:
|
||||
repo.make_commits('a', Commit('C', tree={'a': '0'}), ref='heads/change')
|
||||
repo.make_commits('a', Commit('pr 1', tree={'a': '0'}), ref='heads/change')
|
||||
pr = repo.make_pr(target='a', head='change')
|
||||
repo.post_status('change', 'success', 'legal/cla')
|
||||
pr.post_comment('hansen r+ override=ci/runbot', config['role_reviewer']['token'])
|
||||
pr.post_comment('hansen r+ override=default', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
original = env['runbot_merge.pull_requests'].search([('repository.name', '=', repo.name), ('number', '=', pr.number)])
|
||||
assert original.state == 'ready'
|
||||
assert not original.limit_id
|
||||
|
||||
with repo:
|
||||
repo.post_status('staging.a', 'success', 'legal/cla')
|
||||
repo.post_status('staging.a', 'success', 'ci/runbot')
|
||||
repo.post_status('staging.a', 'success')
|
||||
env.run_crons()
|
||||
|
||||
pr0_id, pr1_id = env['runbot_merge.pull_requests'].search([], order='number')
|
||||
pr0_id, pr1_id, pr2_id = env['runbot_merge.pull_requests'].search([], order='number')
|
||||
assert pr0_id == original
|
||||
assert pr1_id.parent_id, pr0_id
|
||||
assert pr0_id.target.name == 'a'
|
||||
|
||||
with repo:
|
||||
repo.post_status(pr1_id.head, 'success', 'legal/cla')
|
||||
env.run_crons()
|
||||
assert pr1_id.parent_id == pr0_id
|
||||
assert pr1_id.number == 2
|
||||
assert pr1_id.target.name == 'b'
|
||||
assert pr1_id.state == 'validated'
|
||||
assert statuses(pr1_id) == {'ci/runbot': 'success', 'legal/cla': 'success'}
|
||||
assert statuses(pr1_id) == {'default': 'success'}
|
||||
|
||||
assert pr2_id.parent_id == pr1_id
|
||||
assert pr2_id.target.name == 'c'
|
||||
assert pr2_id.state == 'validated'
|
||||
assert statuses(pr2_id) == {'default': 'success'}
|
||||
|
||||
# now we edit the child PR
|
||||
pr_repo, pr_ref = repo.get_pr(pr1_id.number).branch
|
||||
pr1 = repo.get_pr(pr1_id.number)
|
||||
pr_repo, pr_ref = pr1.branch
|
||||
with pr_repo:
|
||||
pr_repo.make_commits(
|
||||
pr1_id.target.name,
|
||||
@ -56,6 +62,12 @@ def test_override_inherited(env, config, make_repo, users):
|
||||
assert pr1_id.state == 'opened'
|
||||
assert not pr1_id.parent_id
|
||||
assert statuses(pr1_id) == {}, "should not have any status left"
|
||||
assert statuses(pr2_id) == {}
|
||||
|
||||
with repo:
|
||||
pr1.post_comment('hansen override=default', config['role_reviewer']['token'])
|
||||
assert statuses(pr1_id) == {'default': 'success'}
|
||||
assert statuses(pr2_id) == {'default': 'success'}
|
||||
|
||||
def test_override_combination(env, config, make_repo, users):
|
||||
""" A forwardport should inherit its parents' overrides, until it's edited.
|
||||
|
@ -161,20 +161,19 @@ def test_straightforward_flow(env, config, make_repo, users):
|
||||
* %s
|
||||
|
||||
To merge the full chain, use
|
||||
> @%s r+
|
||||
> @hansen r+
|
||||
|
||||
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
""" % (
|
||||
users['other'], users['reviewer'],
|
||||
pr1.display_name,
|
||||
project.fp_github_name
|
||||
)),
|
||||
]
|
||||
with prod:
|
||||
prod.post_status(pr2.head, 'success', 'ci/runbot')
|
||||
prod.post_status(pr2.head, 'success', 'legal/cla')
|
||||
|
||||
pr2_remote.post_comment('%s r+' % project.fp_github_name, config['role_reviewer']['token'])
|
||||
pr2_remote.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
|
||||
env.run_crons()
|
||||
|
||||
@ -317,7 +316,6 @@ def test_empty(env, config, make_repo, users):
|
||||
project = env['runbot_merge.project'].search([])
|
||||
project.write({
|
||||
'fp_github_name': False,
|
||||
'fp_github_email': False,
|
||||
'fp_github_token': config['role_other']['token'],
|
||||
})
|
||||
assert project.fp_github_name == users['other']
|
||||
@ -493,7 +491,7 @@ def test_access_rights(env, config, make_repo, users, author, reviewer, delegate
|
||||
prod.post_status(pr2.head, 'success', 'ci/runbot')
|
||||
prod.post_status(pr2.head, 'success', 'legal/cla')
|
||||
prod.get_pr(pr2.number).post_comment(
|
||||
'%s r+' % project.fp_github_name,
|
||||
'hansen r+',
|
||||
token=config['role_' + reviewer]['token']
|
||||
)
|
||||
env.run_crons()
|
||||
@ -587,10 +585,10 @@ def test_delegate_fw(env, config, make_repo, users):
|
||||
(users['user'], '''@{self_reviewer} @{reviewer} this PR targets c and is the last of the forward-port chain.
|
||||
|
||||
To merge the full chain, use
|
||||
> @{bot} r+
|
||||
> @hansen r+
|
||||
|
||||
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
'''.format(bot=pr1_id.repository.project_id.fp_github_name, **users)),
|
||||
'''.format_map(users)),
|
||||
(users['other'], 'hansen r+')
|
||||
]
|
||||
|
||||
@ -630,7 +628,7 @@ def test_redundant_approval(env, config, make_repo, users):
|
||||
with prod:
|
||||
pr1.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
with prod:
|
||||
pr2.post_comment(f'{project.fp_github_name} r+', config['role_reviewer']['token'])
|
||||
pr2.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
assert pr1.comments == [
|
||||
@ -742,7 +740,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
# ok main1 PRs
|
||||
with main1:
|
||||
validate_all([main1], [pr1c.head])
|
||||
main1.get_pr(pr1c.number).post_comment('%s r+' % project.fp_github_name, config['role_reviewer']['token'])
|
||||
main1.get_pr(pr1c.number).post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
# check that the main1 PRs are ready but blocked on the main2 PRs
|
||||
@ -754,7 +752,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
# ok main2 PRs
|
||||
with main2:
|
||||
validate_all([main2], [pr2c.head])
|
||||
main2.get_pr(pr2c.number).post_comment('%s r+' % project.fp_github_name, config['role_reviewer']['token'])
|
||||
main2.get_pr(pr2c.number).post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
env['runbot_merge.stagings'].search([]).mapped('target.display_name')
|
||||
@ -862,27 +860,8 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
with prod:
|
||||
pr1.open()
|
||||
assert pr1_id.state == 'validated'
|
||||
env.run_crons()
|
||||
assert pr1.comments[-1] == (
|
||||
users['user'],
|
||||
"@{} @{} this PR was closed then reopened. "
|
||||
"It should be merged the normal way (via @{})".format(
|
||||
users['user'],
|
||||
users['reviewer'],
|
||||
project.github_prefix,
|
||||
)
|
||||
)
|
||||
|
||||
with prod:
|
||||
pr1.post_comment(f'{project.fp_github_name} r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
assert pr1.comments[-1] == (
|
||||
users['user'],
|
||||
"@{} I can only do this on unmodified forward-port PRs, ask {}.".format(
|
||||
users['reviewer'],
|
||||
project.github_prefix,
|
||||
),
|
||||
)
|
||||
assert not pr1_id.parent_id
|
||||
assert not pr2_id.parent_id
|
||||
|
||||
def test_close_disabled(self, env, make_repo, users, config):
|
||||
""" If an fwport's target is disabled and its branch is closed, it
|
||||
@ -937,7 +916,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
* {pr2_id.display_name}
|
||||
|
||||
To merge the full chain, use
|
||||
> @herbert r+
|
||||
> @hansen r+
|
||||
|
||||
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
""".format(pr2_id=pr2_id, **users)),
|
||||
@ -1063,50 +1042,44 @@ class TestRecognizeCommands:
|
||||
('number', '=', pr.number),
|
||||
])
|
||||
|
||||
# FIXME: remove / merge into mergebot tests
|
||||
def test_botname_casing(self, env, config, make_repo):
|
||||
""" Test that the botname is case-insensitive as people might write
|
||||
bot names capitalised or titlecased or uppercased or whatever
|
||||
"""
|
||||
repo, pr, pr_id = self.make_pr(env, config, make_repo)
|
||||
assert pr_id.state == 'opened'
|
||||
botname = env['runbot_merge.project'].search([]).fp_github_name
|
||||
[a] = env['runbot_merge.branch'].search([
|
||||
('name', '=', 'a')
|
||||
])
|
||||
[c] = env['runbot_merge.branch'].search([
|
||||
('name', '=', 'c')
|
||||
])
|
||||
|
||||
names = [
|
||||
botname,
|
||||
botname.upper(),
|
||||
botname.capitalize(),
|
||||
sPeNgBaB(botname),
|
||||
"hansen",
|
||||
"HANSEN",
|
||||
"Hansen",
|
||||
sPeNgBaB("hansen"),
|
||||
]
|
||||
|
||||
for n in names:
|
||||
assert pr_id.limit_id == c
|
||||
assert not pr_id.limit_id
|
||||
with repo:
|
||||
pr.post_comment('@%s up to a' % n, config['role_reviewer']['token'])
|
||||
pr.post_comment(f'@{n} up to a', config['role_reviewer']['token'])
|
||||
assert pr_id.limit_id == a
|
||||
# reset state
|
||||
pr_id.write({'limit_id': c.id})
|
||||
pr_id.limit_id = False
|
||||
|
||||
# FIXME: remove / merge into mergebot tests
|
||||
@pytest.mark.parametrize('indent', ['', '\N{SPACE}', '\N{SPACE}'*4, '\N{TAB}'])
|
||||
def test_botname_indented(self, env, config, make_repo, indent):
|
||||
""" matching botname should ignore leading whitespaces
|
||||
"""
|
||||
repo, pr, pr_id = self.make_pr(env, config, make_repo)
|
||||
assert pr_id.state == 'opened'
|
||||
botname = env['runbot_merge.project'].search([]).fp_github_name
|
||||
[a] = env['runbot_merge.branch'].search([
|
||||
('name', '=', 'a')
|
||||
])
|
||||
[c] = env['runbot_merge.branch'].search([
|
||||
('name', '=', 'c')
|
||||
])
|
||||
|
||||
assert pr_id.limit_id == c
|
||||
assert not pr_id.limit_id
|
||||
with repo:
|
||||
pr.post_comment('%s@%s up to a' % (indent, botname), config['role_reviewer']['token'])
|
||||
pr.post_comment(f'{indent}@hansen up to a', config['role_reviewer']['token'])
|
||||
assert pr_id.limit_id == a
|
||||
|
@ -25,7 +25,7 @@ def test_update_pr(env, config, make_repo, users, merge_parent) -> None:
|
||||
})
|
||||
with prod:
|
||||
prod.make_commits('c', Commit('1111', tree={'i': 'a'}), ref='heads/d')
|
||||
|
||||
|
||||
with prod:
|
||||
[p_1] = prod.make_commits(
|
||||
'a',
|
||||
@ -108,15 +108,6 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
|
||||
assert pr1_id.head == new_c != pr1_head, "the FP PR should be updated"
|
||||
assert not pr1_id.parent_id, "the FP PR should be detached from the original"
|
||||
assert pr1_remote.comments == [
|
||||
seen(env, pr1_remote, users),
|
||||
fp_intermediate, ci_warning, ci_warning,
|
||||
(users['user'], "@%s @%s this PR was modified / updated and has become a normal PR. "
|
||||
"It should be merged the normal way (via @%s)" % (
|
||||
users['user'], users['reviewer'],
|
||||
pr1_id.repository.project_id.github_prefix
|
||||
)),
|
||||
], "users should be warned that the PR has become non-FP"
|
||||
# NOTE: should the followup PR wait for pr1 CI or not?
|
||||
assert pr2_id.head != pr2_head
|
||||
assert pr2_id.parent_id == pr1_id, "the followup PR should still be linked"
|
||||
@ -132,7 +123,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
'h': 'a',
|
||||
'x': '5'
|
||||
}, "the followup FP should also have the update"
|
||||
|
||||
|
||||
with prod:
|
||||
prod.post_status(pr2_id.head, 'success', 'ci/runbot')
|
||||
prod.post_status(pr2_id.head, 'success', 'legal/cla')
|
||||
@ -155,7 +146,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
pr3_id.write({'parent_id': False, 'detach_reason': "testing"})
|
||||
# pump feedback messages
|
||||
env.run_crons()
|
||||
|
||||
|
||||
pr3 = prod.get_pr(pr3_id.number)
|
||||
assert pr3.comments == [
|
||||
seen(env, pr3, users),
|
||||
@ -164,14 +155,13 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
* {pr2_id.display_name}
|
||||
|
||||
To merge the full chain, use
|
||||
> @{pr3_id.repository.project_id.fp_github_name} r+
|
||||
> @hansen r+
|
||||
|
||||
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
"""),
|
||||
(users['user'], f"@{users['user']} @{users['reviewer']} this PR was "
|
||||
f"modified / updated and has become a normal PR. It "
|
||||
f"should be merged the normal way "
|
||||
f"(via @{pr3_id.repository.project_id.github_prefix})"
|
||||
f"must be merged directly."
|
||||
)
|
||||
]
|
||||
|
||||
@ -197,7 +187,6 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
f"won't cross."),
|
||||
]
|
||||
|
||||
|
||||
def test_update_merged(env, make_repo, config, users):
|
||||
""" Strange things happen when an FP gets closed / merged but then its
|
||||
parent is modified and the forwardport tries to update the (now merged)
|
||||
@ -322,7 +311,6 @@ def test_duplicate_fw(env, make_repo, setreviewers, config, users):
|
||||
'github_prefix': 'hansen',
|
||||
'fp_github_token': config['github']['token'],
|
||||
'fp_github_name': 'herbert',
|
||||
'fp_github_email': 'hb@example.com',
|
||||
'branch_ids': [
|
||||
(0, 0, {'name': 'master', 'sequence': 0}),
|
||||
(0, 0, {'name': 'v3', 'sequence': 1}),
|
||||
@ -377,7 +365,7 @@ def test_duplicate_fw(env, make_repo, setreviewers, config, users):
|
||||
with repo:
|
||||
repo.make_commits('v2', Commit('c0', tree={'z': 'b'}), ref=prv2.ref, make=False)
|
||||
env.run_crons()
|
||||
assert pr_ids.mapped('state') == ['merged', 'opened', 'validated', 'validated']
|
||||
assert pr_ids.mapped('state') == ['merged', 'opened', 'opened', 'opened']
|
||||
assert repo.read_tree(repo.commit(prv2_id.head)) == {'f': 'c', 'h': 'a', 'z': 'b'}
|
||||
assert repo.read_tree(repo.commit(prv3_id.head)) == {'f': 'd', 'i': 'a', 'z': 'b'}
|
||||
assert repo.read_tree(repo.commit(prmaster_id.head)) == {'f': 'e', 'z': 'b'}
|
||||
|
@ -3,85 +3,15 @@ from datetime import datetime
|
||||
|
||||
import pytest
|
||||
|
||||
from utils import seen, Commit, to_pr
|
||||
from utils import seen, Commit, to_pr, make_basic
|
||||
|
||||
|
||||
def make_basic(env, config, make_repo, *, fp_token, fp_remote):
|
||||
""" Creates a basic repo with 3 forking branches
|
||||
|
||||
0 -- 1 -- 2 -- 3 -- 4 : a
|
||||
|
|
||||
`-- 11 -- 22 : b
|
||||
|
|
||||
`-- 111 : c
|
||||
each branch just adds and modifies a file (resp. f, g and h) through the
|
||||
contents sequence a b c d e
|
||||
"""
|
||||
Projects = env['runbot_merge.project']
|
||||
project = Projects.search([('name', '=', 'myproject')])
|
||||
if not project:
|
||||
project = Projects.create({
|
||||
'name': 'myproject',
|
||||
'github_token': config['github']['token'],
|
||||
'github_prefix': 'hansen',
|
||||
'fp_github_token': fp_token and config['github']['token'],
|
||||
'fp_github_name': 'herbert',
|
||||
'fp_github_email': 'hb@example.com',
|
||||
'branch_ids': [
|
||||
(0, 0, {'name': 'a', 'sequence': 2}),
|
||||
(0, 0, {'name': 'b', 'sequence': 1}),
|
||||
(0, 0, {'name': 'c', 'sequence': 0}),
|
||||
],
|
||||
})
|
||||
|
||||
prod = make_repo('proj')
|
||||
with prod:
|
||||
a_0, a_1, a_2, a_3, a_4, = prod.make_commits(
|
||||
None,
|
||||
Commit("0", tree={'f': 'a'}),
|
||||
Commit("1", tree={'f': 'b'}),
|
||||
Commit("2", tree={'f': 'c'}),
|
||||
Commit("3", tree={'f': 'd'}),
|
||||
Commit("4", tree={'f': 'e'}),
|
||||
ref='heads/a',
|
||||
)
|
||||
b_1, b_2 = prod.make_commits(
|
||||
a_2,
|
||||
Commit('11', tree={'g': 'a'}),
|
||||
Commit('22', tree={'g': 'b'}),
|
||||
ref='heads/b',
|
||||
)
|
||||
prod.make_commits(
|
||||
b_1,
|
||||
Commit('111', tree={'h': 'a'}),
|
||||
ref='heads/c',
|
||||
)
|
||||
other = prod.fork()
|
||||
repo = env['runbot_merge.repository'].create({
|
||||
'project_id': project.id,
|
||||
'name': prod.name,
|
||||
'required_statuses': 'legal/cla,ci/runbot',
|
||||
'fp_remote_target': fp_remote and other.name,
|
||||
})
|
||||
env['res.partner'].search([
|
||||
('github_login', '=', config['role_reviewer']['user'])
|
||||
]).write({
|
||||
'review_rights': [(0, 0, {'repository_id': repo.id, 'review': True})]
|
||||
})
|
||||
env['res.partner'].search([
|
||||
('github_login', '=', config['role_self_reviewer']['user'])
|
||||
]).write({
|
||||
'review_rights': [(0, 0, {'repository_id': repo.id, 'self_review': True})]
|
||||
})
|
||||
|
||||
return project, prod, other
|
||||
|
||||
def test_no_token(env, config, make_repo):
|
||||
""" if there's no token on the repo, nothing should break though should
|
||||
log
|
||||
"""
|
||||
# create project configured with remotes on the repo but no token
|
||||
proj, prod, _ = make_basic(env, config, make_repo, fp_token=False, fp_remote=True)
|
||||
prod, _ = make_basic(env, config, make_repo, fp_token=False, fp_remote=True)
|
||||
|
||||
with prod:
|
||||
prod.make_commits(
|
||||
@ -111,8 +41,8 @@ def test_no_token(env, config, make_repo):
|
||||
"should not have created forward port"
|
||||
|
||||
def test_remove_token(env, config, make_repo):
|
||||
proj, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
|
||||
proj.fp_github_token = False
|
||||
prod, _ = make_basic(env, config, make_repo)
|
||||
env['runbot_merge.project'].search([]).fp_github_token = False
|
||||
|
||||
with prod:
|
||||
prod.make_commits(
|
||||
@ -133,7 +63,7 @@ def test_remove_token(env, config, make_repo):
|
||||
"should not have created forward port"
|
||||
|
||||
def test_no_target(env, config, make_repo):
|
||||
proj, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=False)
|
||||
prod, _ = make_basic(env, config, make_repo, fp_remote=False)
|
||||
|
||||
with prod:
|
||||
prod.make_commits(
|
||||
@ -154,7 +84,7 @@ def test_no_target(env, config, make_repo):
|
||||
"should not have created forward port"
|
||||
|
||||
def test_failed_staging(env, config, make_repo):
|
||||
proj, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
|
||||
prod, _ = make_basic(env, config, make_repo)
|
||||
|
||||
reviewer = config['role_reviewer']['token']
|
||||
with prod:
|
||||
@ -181,7 +111,7 @@ def test_failed_staging(env, config, make_repo):
|
||||
with prod:
|
||||
prod.post_status(pr3_id.head, 'success', 'legal/cla')
|
||||
prod.post_status(pr3_id.head, 'success', 'ci/runbot')
|
||||
pr3.post_comment('%s r+' % proj.fp_github_name, reviewer)
|
||||
pr3.post_comment('hansen r+', reviewer)
|
||||
env.run_crons()
|
||||
|
||||
prod.commit('staging.c')
|
||||
@ -192,19 +122,8 @@ def test_failed_staging(env, config, make_repo):
|
||||
prod.post_status('staging.c', 'failure', 'ci/runbot')
|
||||
env.run_crons()
|
||||
|
||||
pr3_head = env['runbot_merge.commit'].search([
|
||||
('sha', '=', pr3_id.head),
|
||||
])
|
||||
assert len(pr3_head) == 1
|
||||
|
||||
assert not pr3_id.batch_id, "check that the PR indeed has no batch anymore"
|
||||
assert not pr3_id.batch_ids.filtered(lambda b: b.active)
|
||||
|
||||
assert len(env['runbot_merge.batch'].search([
|
||||
('prs', 'in', pr3_id.id),
|
||||
'|', ('active', '=', True),
|
||||
('active', '=', False),
|
||||
])) == 2, "check that there do exist batches"
|
||||
pr3_head = env['runbot_merge.commit'].search([('sha', '=', pr3_id.head)])
|
||||
assert pr3_head
|
||||
|
||||
# send a new status to the PR, as if somebody had rebuilt it or something
|
||||
with prod:
|
||||
@ -214,6 +133,8 @@ def test_failed_staging(env, config, make_repo):
|
||||
assert pr3_head.to_check, "check that the commit was updated as to process"
|
||||
env.run_crons()
|
||||
assert not pr3_head.to_check, "check that the commit was processed"
|
||||
assert pr3_id.state == 'ready'
|
||||
assert pr3_id.staging_id
|
||||
|
||||
class TestNotAllBranches:
|
||||
""" Check that forward-ports don't behave completely insanely when not all
|
||||
@ -265,7 +186,6 @@ class TestNotAllBranches:
|
||||
'github_prefix': 'hansen',
|
||||
'fp_github_token': config['github']['token'],
|
||||
'fp_github_name': 'herbert',
|
||||
'fp_github_email': 'hb@example.com',
|
||||
'branch_ids': [
|
||||
(0, 0, {'name': 'a', 'sequence': 2}),
|
||||
(0, 0, {'name': 'b', 'sequence': 1}),
|
||||
@ -318,7 +238,7 @@ class TestNotAllBranches:
|
||||
with a:
|
||||
a.post_status(pr2.head, 'success', 'ci/runbot')
|
||||
a.get_pr(pr2.number).post_comment(
|
||||
'%s r+' % project.fp_github_name,
|
||||
'hansen r+',
|
||||
config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
assert pr1.staging_id
|
||||
@ -357,7 +277,7 @@ class TestNotAllBranches:
|
||||
with b:
|
||||
b.post_status(pr1.head, 'success', 'ci/runbot')
|
||||
b.get_pr(pr1.number).post_comment(
|
||||
'%s r+' % project.fp_github_name,
|
||||
'hansen r+',
|
||||
config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
with a, b:
|
||||
@ -432,8 +352,9 @@ def test_new_intermediate_branch(env, config, make_repo):
|
||||
def validate(repo, commit):
|
||||
repo.post_status(commit, 'success', 'ci/runbot')
|
||||
repo.post_status(commit, 'success', 'legal/cla')
|
||||
project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
|
||||
_, prod2, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
|
||||
prod, _ = make_basic(env, config, make_repo)
|
||||
prod2, _ = make_basic(env, config, make_repo)
|
||||
project = env['runbot_merge.project'].search([])
|
||||
assert len(project.repo_ids) == 2
|
||||
|
||||
original_c_tree = prod.read_tree(prod.commit('c'))
|
||||
@ -580,7 +501,7 @@ def test_new_intermediate_branch(env, config, make_repo):
|
||||
with prod, prod2:
|
||||
for pr in fps.filtered(lambda p: p.target.name == 'c'):
|
||||
get_repo(pr).get_pr(pr.number).post_comment(
|
||||
'%s r+' % project.fp_github_name,
|
||||
'hansen r+',
|
||||
config['role_reviewer']['token'])
|
||||
assert all(p.state == 'merged' for p in PRs.browse(sources)),\
|
||||
"all sources should be merged"
|
||||
@ -610,7 +531,7 @@ def test_new_intermediate_branch(env, config, make_repo):
|
||||
}, "check that new got all the updates (should be in the same state as c really)"
|
||||
|
||||
def test_author_can_close_via_fwbot(env, config, make_repo):
|
||||
project, prod, xxx = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
|
||||
prod, _ = make_basic(env, config, make_repo)
|
||||
other_user = config['role_other']
|
||||
other_token = other_user['token']
|
||||
other = prod.fork(token=other_token)
|
||||
@ -627,7 +548,7 @@ def test_author_can_close_via_fwbot(env, config, make_repo):
|
||||
pr.open(other_token)
|
||||
prod.post_status(c, 'success', 'legal/cla')
|
||||
prod.post_status(c, 'success', 'ci/runbot')
|
||||
pr.post_comment('%s close' % project.fp_github_name, other_token)
|
||||
pr.post_comment('hansen close', other_token)
|
||||
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
assert pr.state == 'open'
|
||||
@ -647,26 +568,26 @@ def test_author_can_close_via_fwbot(env, config, make_repo):
|
||||
pr1.close(other_token)
|
||||
# use can close via fwbot
|
||||
with prod:
|
||||
pr1.post_comment('%s close' % project.fp_github_name, other_token)
|
||||
pr1.post_comment('hansen close', other_token)
|
||||
env.run_crons()
|
||||
assert pr1.state == 'closed'
|
||||
assert pr1_id.state == 'closed'
|
||||
|
||||
def test_skip_ci_all(env, config, make_repo):
|
||||
project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
|
||||
prod, _ = make_basic(env, config, make_repo)
|
||||
|
||||
with prod:
|
||||
prod.make_commits('a', Commit('x', tree={'x': '0'}), ref='heads/change')
|
||||
pr = prod.make_pr(target='a', head='change')
|
||||
prod.post_status(pr.head, 'success', 'legal/cla')
|
||||
prod.post_status(pr.head, 'success', 'ci/runbot')
|
||||
pr.post_comment('%s skipci' % project.fp_github_name, config['role_reviewer']['token'])
|
||||
pr.post_comment('hansen fw=skipci', config['role_reviewer']['token'])
|
||||
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
assert env['runbot_merge.pull_requests'].search([
|
||||
('repository.name', '=', prod.name),
|
||||
('number', '=', pr.number)
|
||||
]).fw_policy == 'skipci'
|
||||
]).batch_id.fw_policy == 'skipci'
|
||||
|
||||
with prod:
|
||||
prod.post_status('staging.a', 'success', 'legal/cla')
|
||||
@ -685,7 +606,7 @@ def test_skip_ci_all(env, config, make_repo):
|
||||
assert pr2_id.source_id == pr0_id
|
||||
|
||||
def test_skip_ci_next(env, config, make_repo):
|
||||
project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
|
||||
prod, _ = make_basic(env, config, make_repo)
|
||||
|
||||
with prod:
|
||||
prod.make_commits('a', Commit('x', tree={'x': '0'}), ref='heads/change')
|
||||
@ -703,10 +624,10 @@ def test_skip_ci_next(env, config, make_repo):
|
||||
pr0_id, pr1_id = env['runbot_merge.pull_requests'].search([], order='number')
|
||||
with prod:
|
||||
prod.get_pr(pr1_id.number).post_comment(
|
||||
'%s skipci' % project.fp_github_name,
|
||||
config['role_user']['token']
|
||||
'hansen fw=skipci',
|
||||
config['role_reviewer']['token']
|
||||
)
|
||||
assert pr0_id.fw_policy == 'skipci'
|
||||
assert pr0_id.batch_id.fw_policy == 'skipci'
|
||||
env.run_crons()
|
||||
|
||||
_, _, pr2_id = env['runbot_merge.pull_requests'].search([], order='number')
|
||||
@ -723,7 +644,8 @@ def test_retarget_after_freeze(env, config, make_repo, users):
|
||||
latter port. In that case the reinsertion task should just do nothing, and
|
||||
the retargeted PR should be forward-ported normally once merged.
|
||||
"""
|
||||
project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
|
||||
prod, _ = make_basic(env, config, make_repo)
|
||||
project = env['runbot_merge.project'].search([])
|
||||
with prod:
|
||||
[c] = prod.make_commits('b', Commit('thing', tree={'x': '1'}), ref='heads/mypr')
|
||||
pr = prod.make_pr(target='b', head='mypr')
|
||||
@ -790,13 +712,16 @@ def test_retarget_after_freeze(env, config, make_repo, users):
|
||||
prod.post_status('staging.bprime', 'success', 'legal/cla')
|
||||
env.run_crons()
|
||||
|
||||
# #2 batch 6 (???)
|
||||
assert port_id.state == 'merged'
|
||||
|
||||
new_pr_id = env['runbot_merge.pull_requests'].search([('state', 'not in', ('merged', 'closed'))])
|
||||
assert len(new_pr_id) == 1
|
||||
assert new_pr_id.parent_id == port_id
|
||||
assert new_pr_id.target == branch_c
|
||||
|
||||
def test_approve_draft(env, config, make_repo, users):
|
||||
_, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
|
||||
prod, _ = make_basic(env, config, make_repo)
|
||||
|
||||
with prod:
|
||||
prod.make_commits('a', Commit('x', tree={'x': '0'}), ref='heads/change')
|
||||
@ -809,7 +734,7 @@ def test_approve_draft(env, config, make_repo, users):
|
||||
assert pr.comments == [
|
||||
(users['reviewer'], 'hansen r+'),
|
||||
seen(env, pr, users),
|
||||
(users['user'], f"I'm sorry, @{users['reviewer']}: draft PRs can not be approved."),
|
||||
(users['user'], f"@{users['reviewer']} draft PRs can not be approved."),
|
||||
]
|
||||
|
||||
with prod:
|
||||
@ -825,7 +750,8 @@ def test_freeze(env, config, make_repo, users):
|
||||
|
||||
- should not forward-port the freeze PRs themselves
|
||||
"""
|
||||
project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
|
||||
prod, _ = make_basic(env, config, make_repo)
|
||||
project = env['runbot_merge.project'].search([])
|
||||
# branches here are "a" (older), "b", and "c" (master)
|
||||
with prod:
|
||||
[root, _] = prod.make_commits(
|
||||
@ -879,7 +805,7 @@ def test_missing_magic_ref(env, config, make_repo):
|
||||
Emulate this behaviour by updating the PR with a commit which lives in the
|
||||
repo but has no ref.
|
||||
"""
|
||||
_, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
|
||||
prod, _ = make_basic(env, config, make_repo)
|
||||
a_head = prod.commit('refs/heads/a')
|
||||
with prod:
|
||||
[c] = prod.make_commits(a_head.id, Commit('x', tree={'x': '0'}), ref='heads/change')
|
||||
@ -926,3 +852,254 @@ def test_missing_magic_ref(env, config, make_repo):
|
||||
# what they are (rather than e.g. diff the HEAD it branch with the target)
|
||||
# as a result it doesn't forwardport our fake, we'd have to reset the PR's
|
||||
# branch for that to happen
|
||||
|
||||
def test_disable_branch_with_batches(env, config, make_repo, users):
|
||||
"""We want to avoid losing pull requests, so when deactivating a branch,
|
||||
if there are *forward port* batches targeting that branch which have not
|
||||
been forward ported yet port them over, as if their source had been merged
|
||||
after the branch was disabled (thus skipped over)
|
||||
"""
|
||||
repo, fork = make_basic(env, config, make_repo, statuses="default")
|
||||
proj = env['runbot_merge.project'].search([])
|
||||
branch_b = env['runbot_merge.branch'].search([('name', '=', 'b')])
|
||||
assert branch_b
|
||||
|
||||
# region repo2 creation & setup
|
||||
repo2 = make_repo('proj2')
|
||||
with repo2:
|
||||
[a, b, c] = repo2.make_commits(
|
||||
None,
|
||||
Commit("a", tree={"f": "a"}),
|
||||
Commit("b", tree={"g": "b"}),
|
||||
Commit("c", tree={"h": "c"}),
|
||||
)
|
||||
repo2.make_ref("heads/a", a)
|
||||
repo2.make_ref("heads/b", b)
|
||||
repo2.make_ref("heads/c", c)
|
||||
fork2 = repo2.fork()
|
||||
repo2_id = env['runbot_merge.repository'].create({
|
||||
"project_id": proj.id,
|
||||
"name": repo2.name,
|
||||
"required_statuses": "default",
|
||||
"fp_remote_target": fork2.name,
|
||||
})
|
||||
env['res.partner'].search([
|
||||
('github_login', '=', config['role_reviewer']['user'])
|
||||
]).write({
|
||||
'review_rights': [(0, 0, {'repository_id': repo2_id.id, 'review': True})]
|
||||
})
|
||||
env['res.partner'].search([
|
||||
('github_login', '=', config['role_self_reviewer']['user'])
|
||||
]).write({
|
||||
'review_rights': [(0, 0, {'repository_id': repo2_id.id, 'self_review': True})]
|
||||
})
|
||||
# endregion
|
||||
|
||||
# region set up forward ported batch
|
||||
with repo, fork, repo2, fork2:
|
||||
fork.make_commits("a", Commit("x", tree={"x": "1"}), ref="heads/x")
|
||||
pr1_a = repo.make_pr(title="X", target="a", head=f"{fork.owner}:x")
|
||||
pr1_a.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||
repo.post_status(pr1_a.head, "success")
|
||||
|
||||
fork2.make_commits("a", Commit("x", tree={"x": "1"}), ref="heads/x")
|
||||
pr2_a = repo2.make_pr(title="X", target="a", head=f"{fork2.owner}:x")
|
||||
pr2_a.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||
repo2.post_status(pr2_a.head, "success")
|
||||
# remove just pr2 from the forward ports (maybe?)
|
||||
pr2_a_id = to_pr(env, pr2_a)
|
||||
pr2_a_id.limit_id = branch_b.id
|
||||
env.run_crons()
|
||||
assert pr2_a_id.limit_id == branch_b
|
||||
# endregion
|
||||
|
||||
|
||||
with repo, repo2:
|
||||
repo.post_status('staging.a', 'success')
|
||||
repo2.post_status('staging.a', 'success')
|
||||
env.run_crons()
|
||||
|
||||
PullRequests = env['runbot_merge.pull_requests']
|
||||
pr1_b_id = PullRequests.search([('parent_id', '=', to_pr(env, pr1_a).id)])
|
||||
pr2_b_id = PullRequests.search([('parent_id', '=', pr2_a_id.id)])
|
||||
assert pr1_b_id.parent_id
|
||||
assert pr1_b_id.state == 'opened'
|
||||
assert pr2_b_id.parent_id
|
||||
assert pr2_b_id.state == 'opened'
|
||||
|
||||
b_id = proj.branch_ids.filtered(lambda b: b.name == 'b')
|
||||
proj.write({
|
||||
'branch_ids': [(1, b_id.id, {'active': False})]
|
||||
})
|
||||
env.run_crons()
|
||||
assert not b_id.active
|
||||
assert PullRequests.search_count([]) == 5, "should have ported pr1 but not pr2"
|
||||
assert PullRequests.search([], order="number DESC", limit=1).parent_id == pr1_b_id
|
||||
|
||||
assert repo.get_pr(pr1_b_id.number).comments == [
|
||||
seen(env, repo.get_pr(pr1_b_id.number), users),
|
||||
(users['user'], "This PR targets b and is part of the forward-port chain. Further PRs will be created up to c.\n\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n"),
|
||||
(users['user'], "@{user} @{reviewer} the target branch 'b' has been disabled, you may want to close this PR.\n\nAs this was not its limit, it will automatically be forward ported to the next active branch.".format_map(users)),
|
||||
]
|
||||
assert repo2.get_pr(pr2_b_id.number).comments == [
|
||||
seen(env, repo2.get_pr(pr2_b_id.number), users),
|
||||
(users['user'], """\
|
||||
@{user} @{reviewer} this PR targets b and is the last of the forward-port chain.
|
||||
|
||||
To merge the full chain, use
|
||||
> @hansen r+
|
||||
|
||||
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
""".format_map(users)),
|
||||
(users['user'], "@{user} @{reviewer} the target branch 'b' has been disabled, you may want to close this PR.".format_map(users)),
|
||||
]
|
||||
|
||||
def test_disable_multitudes(env, config, make_repo, users, setreviewers):
|
||||
"""Ensure that deactivation ports can jump over other deactivated branches.
|
||||
"""
|
||||
# region setup
|
||||
repo = make_repo("bob")
|
||||
project = env['runbot_merge.project'].create({
|
||||
"name": "bob",
|
||||
"github_token": config['github']['token'],
|
||||
"github_prefix": "hansen",
|
||||
"fp_github_token": config['github']['token'],
|
||||
"fp_github_name": "herbert",
|
||||
"branch_ids": [
|
||||
(0, 0, {'name': 'a', 'sequence': 90}),
|
||||
(0, 0, {'name': 'b', 'sequence': 80}),
|
||||
(0, 0, {'name': 'c', 'sequence': 70}),
|
||||
(0, 0, {'name': 'd', 'sequence': 60}),
|
||||
],
|
||||
"repo_ids": [(0, 0, {
|
||||
'name': repo.name,
|
||||
'required_statuses': 'default',
|
||||
'fp_remote_target': repo.name,
|
||||
})],
|
||||
})
|
||||
setreviewers(project.repo_ids)
|
||||
|
||||
with repo:
|
||||
[a, b, c, d] = repo.make_commits(
|
||||
None,
|
||||
Commit("a", tree={"branch": "a"}),
|
||||
Commit("b", tree={"branch": "b"}),
|
||||
Commit("c", tree={"branch": "c"}),
|
||||
Commit("d", tree={"branch": "d"}),
|
||||
)
|
||||
repo.make_ref("heads/a", a)
|
||||
repo.make_ref("heads/b", b)
|
||||
repo.make_ref("heads/c", c)
|
||||
repo.make_ref("heads/d", d)
|
||||
# endregion
|
||||
|
||||
with repo:
|
||||
[a] = repo.make_commits("a", Commit("X", tree={"x": "1"}), ref="heads/x")
|
||||
pra = repo.make_pr(target="a", head="x")
|
||||
pra.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||
repo.post_status(a, "success")
|
||||
env.run_crons()
|
||||
|
||||
with repo:
|
||||
repo.post_status('staging.a', 'success')
|
||||
env.run_crons()
|
||||
|
||||
pra_id = to_pr(env, pra)
|
||||
assert pra_id.state == 'merged'
|
||||
|
||||
prb_id = env['runbot_merge.pull_requests'].search([('target.name', '=', 'b')])
|
||||
assert prb_id.parent_id == pra_id
|
||||
|
||||
project.write({
|
||||
'branch_ids': [
|
||||
(1, b.id, {'active': False})
|
||||
for b in env['runbot_merge.branch'].search([('name', 'in', ['b', 'c'])])
|
||||
]
|
||||
})
|
||||
env.run_crons()
|
||||
|
||||
# should not have ported prb to the disabled branch c
|
||||
assert not env['runbot_merge.pull_requests'].search([('target.name', '=', 'c')])
|
||||
|
||||
# should have ported prb to the active branch d
|
||||
prd_id = env['runbot_merge.pull_requests'].search([('target.name', '=', 'd')])
|
||||
assert prd_id
|
||||
assert prd_id.parent_id == prb_id
|
||||
|
||||
prb = repo.get_pr(prb_id.number)
|
||||
assert prb.comments == [
|
||||
seen(env, prb, users),
|
||||
(users['user'], 'This PR targets b and is part of the forward-port chain. Further PRs will be created up to d.\n\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n'),
|
||||
(users['user'], """\
|
||||
@{user} @{reviewer} the target branch 'b' has been disabled, you may want to close this PR.
|
||||
|
||||
As this was not its limit, it will automatically be forward ported to the next active branch.\
|
||||
""".format_map(users)),
|
||||
]
|
||||
prd = repo.get_pr(prd_id.number)
|
||||
assert prd.comments == [
|
||||
seen(env, prd, users),
|
||||
(users['user'], """\
|
||||
@{user} @{reviewer} this PR targets d and is the last of the forward-port chain.
|
||||
|
||||
To merge the full chain, use
|
||||
> @hansen r+
|
||||
|
||||
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||
""".format_map(users))
|
||||
]
|
||||
|
||||
def test_maintain_batch_history(env, config, make_repo, users):
|
||||
"""Batches which are part of a forward port sequence should not be deleted
|
||||
even if all their PRs are closed.
|
||||
|
||||
Sadly in that case it's a bit difficult to maintain the integrity of the
|
||||
batch as each PR being closed (until the last one?) will be removed from
|
||||
the batch.
|
||||
"""
|
||||
repo, fork = make_basic(env, config, make_repo, statuses="default")
|
||||
|
||||
with repo, fork:
|
||||
fork.make_commits("a", Commit("x", tree={"x": "1"}), ref="heads/x")
|
||||
pr1_a = repo.make_pr(title="X", target="a", head=f"{fork.owner}:x")
|
||||
pr1_a.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||
repo.post_status(pr1_a.head, "success")
|
||||
env.run_crons()
|
||||
|
||||
pr1_a_id = to_pr(env, pr1_a)
|
||||
with repo:
|
||||
repo.post_status('staging.a', 'success')
|
||||
env.run_crons()
|
||||
|
||||
pr1_b_id = env['runbot_merge.pull_requests'].search([('parent_id', '=', pr1_a_id.id)])
|
||||
with repo:
|
||||
repo.post_status(pr1_b_id.head, 'success')
|
||||
env.run_crons()
|
||||
|
||||
pr1_c_id = env['runbot_merge.pull_requests'].search([('parent_id', '=', pr1_b_id.id)])
|
||||
|
||||
# region check that all the batches are set up correctly
|
||||
assert pr1_a_id.batch_id
|
||||
assert pr1_b_id.batch_id
|
||||
assert pr1_c_id.batch_id
|
||||
assert pr1_c_id.batch_id.parent_id == pr1_b_id.batch_id
|
||||
assert pr1_b_id.batch_id.parent_id == pr1_a_id.batch_id
|
||||
b_batch = pr1_b_id.batch_id
|
||||
assert b_batch
|
||||
# endregion
|
||||
|
||||
pr1_b = repo.get_pr(pr1_b_id.number)
|
||||
with repo:
|
||||
pr1_b.close()
|
||||
env.run_crons()
|
||||
assert pr1_b_id.state == 'closed'
|
||||
|
||||
# region check that all the batches are *still* set up correctly
|
||||
assert b_batch.exists()
|
||||
assert pr1_a_id.batch_id == b_batch.parent_id
|
||||
assert pr1_b_id.batch_id == b_batch
|
||||
assert pr1_c_id.batch_id.parent_id == b_batch
|
||||
|
||||
assert pr1_b_id in b_batch.all_prs, "the PR is still in the batch"
|
||||
assert pr1_b_id not in b_batch.prs, "the PR is not in the open/active batch PRs"
|
||||
# endregion
|
||||
|
@ -49,22 +49,61 @@ class re_matches:
|
||||
def __eq__(self, text):
|
||||
return self._r.match(text)
|
||||
|
||||
def __str__(self):
|
||||
return re.sub(r'\\(.)', r'\1', self._r.pattern)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self._r.pattern)
|
||||
return repr(str(self))
|
||||
|
||||
def seen(env, pr, users):
|
||||
return users['user'], f'[Pull request status dashboard]({to_pr(env, pr).url}).'
|
||||
url = to_pr(env, pr).url
|
||||
return users['user'], f'[]({url})'
|
||||
|
||||
def make_basic(env, config, make_repo, *, reponame='proj', project_name='myproject'):
|
||||
""" Creates a basic repo with 3 forking branches
|
||||
def make_basic(
|
||||
env,
|
||||
config,
|
||||
make_repo,
|
||||
*,
|
||||
project_name='myproject',
|
||||
reponame='proj',
|
||||
statuses='legal/cla,ci/runbot',
|
||||
fp_token=True,
|
||||
fp_remote=True,
|
||||
):
|
||||
""" Creates a project ``project_name`` **if none exists**, otherwise
|
||||
retrieves the existing one and adds a new repository and its fork.
|
||||
|
||||
Repositories are setup with three forking branches:
|
||||
|
||||
::
|
||||
|
||||
f = 0 -- 1 -- 2 -- 3 -- 4 : a
|
||||
|
|
||||
g = `-- 11 -- 22 : b
|
||||
|
|
||||
h = `-- 111 : c
|
||||
|
||||
f = 0 -- 1 -- 2 -- 3 -- 4 : a
|
||||
|
|
||||
g = `-- 11 -- 22 : b
|
||||
|
|
||||
h = `-- 111 : c
|
||||
each branch just adds and modifies a file (resp. f, g and h) through the
|
||||
contents sequence a b c d e
|
||||
|
||||
:param env: Environment, for odoo model interactions
|
||||
:param config: pytest project config thingie
|
||||
:param make_repo: repo maker function, normally the fixture, should be a
|
||||
``Callable[[str], Repo]``
|
||||
:param project_name: internal project name, can be used to recover the
|
||||
project object afterward, matches exactly since it's
|
||||
unique per odoo db (and thus test)
|
||||
:param reponame: the base name of the repository, for identification, for
|
||||
concurrency reasons the actual repository name *will* be
|
||||
different
|
||||
:param statuses: required statuses for the repository, stupidly default to
|
||||
the old Odoo statuses, should be moved to ``default`` over
|
||||
time for simplicity (unless the test specifically calls for
|
||||
multiple statuses)
|
||||
:param fp_token: whether to set the ``fp_github_token`` on the project if
|
||||
/ when creating it
|
||||
:param fp_remote: whether to create a fork repo and set it as the
|
||||
repository's ``fp_remote_target``
|
||||
"""
|
||||
Projects = env['runbot_merge.project']
|
||||
project = Projects.search([('name', '=', project_name)])
|
||||
@ -73,9 +112,8 @@ def make_basic(env, config, make_repo, *, reponame='proj', project_name='myproje
|
||||
'name': project_name,
|
||||
'github_token': config['github']['token'],
|
||||
'github_prefix': 'hansen',
|
||||
'fp_github_token': config['github']['token'],
|
||||
'fp_github_token': fp_token and config['github']['token'],
|
||||
'fp_github_name': 'herbert',
|
||||
'fp_github_email': 'hb@example.com',
|
||||
'branch_ids': [
|
||||
(0, 0, {'name': 'a', 'sequence': 100}),
|
||||
(0, 0, {'name': 'b', 'sequence': 80}),
|
||||
@ -105,12 +143,12 @@ def make_basic(env, config, make_repo, *, reponame='proj', project_name='myproje
|
||||
Commit('111', tree={'h': 'a'}),
|
||||
ref='heads/c',
|
||||
)
|
||||
other = prod.fork()
|
||||
other = prod.fork() if fp_remote else None
|
||||
repo = env['runbot_merge.repository'].create({
|
||||
'project_id': project.id,
|
||||
'name': prod.name,
|
||||
'required_statuses': 'legal/cla,ci/runbot',
|
||||
'fp_remote_target': other.name,
|
||||
'required_statuses': statuses,
|
||||
'fp_remote_target': other.name if other else False,
|
||||
})
|
||||
env['res.partner'].search([
|
||||
('github_login', '=', config['role_reviewer']['user'])
|
||||
@ -145,3 +183,7 @@ def part_of(label, pr_id, *, separator='\n\n'):
|
||||
""" Adds the "part-of" pseudo-header in the footer.
|
||||
"""
|
||||
return f'{label}{separator}Part-of: {pr_id.display_name}'
|
||||
|
||||
def ensure_one(records):
|
||||
assert len(records) == 1
|
||||
return records
|
||||
|
@ -1,7 +1,7 @@
|
||||
{
|
||||
'name': 'merge bot',
|
||||
'version': '1.9',
|
||||
'depends': ['contacts', 'website'],
|
||||
'version': '1.12',
|
||||
'depends': ['contacts', 'mail', 'website'],
|
||||
'data': [
|
||||
'security/security.xml',
|
||||
'security/ir.model.access.csv',
|
||||
@ -12,6 +12,7 @@
|
||||
'data/runbot_merge.pull_requests.feedback.template.csv',
|
||||
'views/res_partner.xml',
|
||||
'views/runbot_merge_project.xml',
|
||||
'views/batch.xml',
|
||||
'views/mergebot.xml',
|
||||
'views/queues.xml',
|
||||
'views/configuration.xml',
|
||||
|
57
runbot_merge/changelog/2023-12/commands.md
Normal file
57
runbot_merge/changelog/2023-12/commands.md
Normal file
@ -0,0 +1,57 @@
|
||||
CHG: complete rework of the commands system
|
||||
|
||||
# fun is dead: strict commands parsing
|
||||
|
||||
Historically the bots would apply whatever looked like a command and ignore the
|
||||
rest. This led to people sending novels to the bot, then being surprised the bot
|
||||
found a command in the mess.
|
||||
|
||||
The bots now ignore all lines which contain any non-command. Example:
|
||||
|
||||
> @robodoo r+ when green darling
|
||||
|
||||
Previously, the bot would apply the `r+` and ignore the rest. Now the bot will
|
||||
ignore everything and reply with
|
||||
|
||||
> unknown command "when"
|
||||
|
||||
# fwbot is dead
|
||||
|
||||
The mergebot (@robodoo) is now responsible for the old fwbot commands:
|
||||
|
||||
- close, ignore, up to, ... work as they ever did, just with robodoo
|
||||
- `robodoo r+` now approves the parents if the current PR a forward port
|
||||
- a specific PR can be approved even in forward ports by providing its number
|
||||
e.g. `robodoo r=45328` will approve just PR 45328, if that is the PR the
|
||||
comment is being posted on or one of its parents
|
||||
- the approval of forward ports won't skip over un-approvable PRs anymore
|
||||
- the rights of the original author have been restricted slightly: they can
|
||||
only approve the direct descendents of merged PRs, so if one of the parents
|
||||
has been modified and is not merged yet, the original author can't approve,
|
||||
nor can they approve the modified PR, or a conflicting PR which has to get
|
||||
fixed (?)
|
||||
|
||||
# no more p=<number>
|
||||
|
||||
The old priorities command was a tangle of multiple concerns, not all of which
|
||||
were always desired or applicable. These tangles have been split along their
|
||||
various axis.
|
||||
|
||||
# listing
|
||||
|
||||
The new commands are:
|
||||
|
||||
- `default`, sets the staging priority back to the default
|
||||
- `priority`, sets the staging priority to elevated, on staging these PRs are
|
||||
staged first, then the `normal` PRs are added
|
||||
- `alone`, sets the staging priority to high, these PRs are staged before
|
||||
considering splits, and only `alone` PRs are staged together even if the batch
|
||||
is not full
|
||||
- `fw=default`, processes forward ports normally
|
||||
- `fw=skipci`, once the current PR has been merged creates all the forward ports
|
||||
without waiting for each to have valid statuses
|
||||
- `fw=skipmerge`, immediately create all forward ports even if the base pull
|
||||
request has not even been merged yet
|
||||
- `skipchecks`, makes the entire batch (target PR and any linked PR) immediately
|
||||
ready, bypassing statuses and reviews
|
||||
- `cancel`, cancels the staging on the target branch, if any
|
4
runbot_merge/changelog/2023-12/staging-priority.md
Normal file
4
runbot_merge/changelog/2023-12/staging-priority.md
Normal file
@ -0,0 +1,4 @@
|
||||
ADD: projects now know how to prioritise new PRs over splits
|
||||
|
||||
While this likely has relatively low utility, we'll look at how it performs
|
||||
during periods of high throughput.
|
14
runbot_merge/changelog/2023-12/staging-shutdown.md
Normal file
14
runbot_merge/changelog/2023-12/staging-shutdown.md
Normal file
@ -0,0 +1,14 @@
|
||||
ADD: stagings can now be disabled on a per-project basis
|
||||
|
||||
Currently stopping stagings requires stopping the staging cron(s), which causes
|
||||
several issues:
|
||||
|
||||
- the staging cron runs very often, so it can be difficult to find a window to
|
||||
deactivate it (as the cron runner acquires an exclusive lock on the cron)
|
||||
- the staging cron is global, so it does not disable staging only on the
|
||||
problematic project (to say nothing of branch) but on all of them
|
||||
|
||||
The latter is not currently a huge issue as only one of the mergebot-tracked
|
||||
projects is ultra active (spreadsheet activity is on the order of a few
|
||||
single-PR stagings a day), but the former is really annoying when trying to
|
||||
stop runaway broken stagings.
|
@ -176,7 +176,7 @@ def handle_pr(env, event):
|
||||
return env['runbot_merge.pull_requests'].search([
|
||||
('repository', '=', repo.id),
|
||||
('number', '=', pr['number']),
|
||||
('target', '=', target.id),
|
||||
# ('target', '=', target.id),
|
||||
])
|
||||
# edition difficulty: pr['base']['ref] is the *new* target, the old one
|
||||
# is at event['change']['base']['ref'] (if the target changed), so edition
|
||||
@ -288,7 +288,8 @@ def handle_pr(env, event):
|
||||
)
|
||||
|
||||
pr_obj.write({
|
||||
'state': 'opened',
|
||||
'reviewed_by': False,
|
||||
'error': False,
|
||||
'head': pr['head']['sha'],
|
||||
'squash': pr['commits'] == 1,
|
||||
})
|
||||
@ -327,11 +328,10 @@ def handle_pr(env, event):
|
||||
close=True,
|
||||
message=env.ref('runbot_merge.handle.pr.merged')._format(event=event),
|
||||
)
|
||||
|
||||
if pr_obj.state == 'closed':
|
||||
elif pr_obj.closed:
|
||||
_logger.info('%s reopening %s', event['sender']['login'], pr_obj.display_name)
|
||||
pr_obj.write({
|
||||
'state': 'opened',
|
||||
'closed': False,
|
||||
# updating the head triggers a revalidation
|
||||
'head': pr['head']['sha'],
|
||||
'squash': pr['commits'] == 1,
|
||||
|
@ -1,13 +1,26 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import collections
|
||||
import colorsys
|
||||
import hashlib
|
||||
import io
|
||||
import json
|
||||
import math
|
||||
import pathlib
|
||||
from email.utils import formatdate
|
||||
from itertools import chain, product
|
||||
from typing import Tuple, cast, Mapping
|
||||
|
||||
import markdown
|
||||
import markupsafe
|
||||
import werkzeug.exceptions
|
||||
import werkzeug.wrappers
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
|
||||
from odoo.http import Controller, route, request
|
||||
from odoo.tools import file_open
|
||||
|
||||
LIMIT = 20
|
||||
class MergebotDashboard(Controller):
|
||||
@ -79,8 +92,8 @@ class MergebotDashboard(Controller):
|
||||
'entries': entries,
|
||||
})
|
||||
|
||||
@route('/<org>/<repo>/pull/<int(min=1):pr>', auth='public', type='http', website=True, sitemap=False)
|
||||
def pr(self, org, repo, pr):
|
||||
@route('/<org>/<repo>/pull/<int(min=1):pr><any("", ".png"):png>', auth='public', type='http', website=True, sitemap=False)
|
||||
def pr(self, org, repo, pr, png):
|
||||
pr_id = request.env['runbot_merge.pull_requests'].sudo().search([
|
||||
('repository.name', '=', f'{org}/{repo}'),
|
||||
('number', '=', int(pr)),
|
||||
@ -90,6 +103,9 @@ class MergebotDashboard(Controller):
|
||||
if not pr_id.repository.group_id <= request.env.user.groups_id:
|
||||
raise werkzeug.exceptions.NotFound()
|
||||
|
||||
if png:
|
||||
return raster_render(pr_id)
|
||||
|
||||
st = {}
|
||||
if pr_id.statuses:
|
||||
# normalise `statuses` to map to a dict
|
||||
@ -102,3 +118,218 @@ class MergebotDashboard(Controller):
|
||||
'merged_head': json.loads(pr_id.commits_map).get(''),
|
||||
'statuses': st
|
||||
})
|
||||
|
||||
def raster_render(pr):
|
||||
default_headers = {
|
||||
'Content-Type': 'image/png',
|
||||
'Last-Modified': formatdate(),
|
||||
# - anyone can cache the image, so public
|
||||
# - crons run about every minute so that's how long a request is fresh
|
||||
# - if the mergebot can't be contacted, allow using the stale response (no must-revalidate)
|
||||
# - intermediate caches can recompress the PNG if they want (pillow is not a very good PNG generator)
|
||||
# - the response is mutable even during freshness, technically (as there
|
||||
# is no guarantee the freshness window lines up with the cron, plus
|
||||
# some events are not cron-based)
|
||||
# - maybe don't allow serving the stale image *while* revalidating?
|
||||
# - allow serving a stale image for a day if the server returns 500
|
||||
'Cache-Control': 'public, max-age=60, stale-if-error=86400',
|
||||
}
|
||||
if if_none_match := request.httprequest.headers.get('If-None-Match'):
|
||||
# just copy the existing value out if we received any
|
||||
default_headers['ETag'] = if_none_match
|
||||
|
||||
# weak validation: check the latest modification date of all objects involved
|
||||
project, repos, branches, genealogy = pr.env.ref('runbot_merge.dashboard-pre')\
|
||||
._run_action_code_multi({'pr': pr})
|
||||
|
||||
# last-modified should be in RFC2822 format, which is what
|
||||
# email.utils.formatdate does (sadly takes a timestamp but...)
|
||||
last_modified = formatdate(max((
|
||||
o.write_date
|
||||
for o in chain(
|
||||
project,
|
||||
repos,
|
||||
branches,
|
||||
genealogy,
|
||||
genealogy.all_prs | pr,
|
||||
)
|
||||
)).timestamp())
|
||||
# The (304) response must not contain a body and must include the headers
|
||||
# that would have been sent in an equivalent 200 OK response
|
||||
headers = {**default_headers, 'Last-Modified': last_modified}
|
||||
if request.httprequest.headers.get('If-Modified-Since') == last_modified:
|
||||
return werkzeug.wrappers.Response(status=304, headers=headers)
|
||||
|
||||
with file_open('web/static/fonts/google/Open_Sans/Open_Sans-Regular.ttf', 'rb') as f:
|
||||
font = ImageFont.truetype(f, size=16, layout_engine=0)
|
||||
f.seek(0)
|
||||
supfont = ImageFont.truetype(f, size=10, layout_engine=0)
|
||||
with file_open('web/static/fonts/google/Open_Sans/Open_Sans-Bold.ttf', 'rb') as f:
|
||||
bold = ImageFont.truetype(f, size=16, layout_engine=0)
|
||||
|
||||
batches = pr.env.ref('runbot_merge.dashboard-prep')._run_action_code_multi({
|
||||
'pr': pr,
|
||||
'repos': repos,
|
||||
'branches': branches,
|
||||
'genealogy': genealogy,
|
||||
})
|
||||
|
||||
# getbbox returns (left, top, right, bottom)
|
||||
|
||||
rows = {b: font.getbbox(b.name)[3] for b in branches}
|
||||
rows[None] = max(bold.getbbox(r.name)[3] for r in repos)
|
||||
|
||||
columns = {r: bold.getbbox(r.name)[2] for r in repos}
|
||||
columns[None] = max(font.getbbox(b.name)[2] for b in branches)
|
||||
|
||||
etag = hashlib.sha256(f"(P){pr.id},{pr.repository.id},{pr.target.id}".encode())
|
||||
# repos and branches should be in a consistent order so can just hash that
|
||||
etag.update(''.join(f'(R){r.name}' for r in repos).encode())
|
||||
etag.update(''.join(f'(T){b.name},{b.active}' for b in branches).encode())
|
||||
# and product of deterministic iterations should be deterministic
|
||||
for r, b in product(repos, branches):
|
||||
ps = batches[r, b]
|
||||
etag.update(f"(B){ps['state']},{ps['detached']},{ps['active']}".encode())
|
||||
# technically label (state + blocked) does not actually impact image
|
||||
# render (though subcomponents of state do) however blocked is useful
|
||||
# to force an etag miss so keeping it
|
||||
# TODO: blocked includes draft & merge method, maybe should change looks?
|
||||
etag.update(''.join(
|
||||
f"(PS){p['label']},{p['closed']},{p['number']},{p['checked']},{p['reviewed']},{p['attached']}"
|
||||
for p in ps['prs']
|
||||
).encode())
|
||||
|
||||
w = h = 0
|
||||
for p in ps['prs']:
|
||||
_, _, ww, hh = font.getbbox(f" #{p['number']}")
|
||||
w += ww + supfont.getbbox(' '.join(filter(None, [
|
||||
'error' if p['pr'].error else '',
|
||||
'' if p['checked'] else 'unchecked',
|
||||
'' if p['reviewed'] else 'unreviewed',
|
||||
'' if p['attached'] else 'detached',
|
||||
])))[2]
|
||||
h = max(hh, h)
|
||||
rows[b] = max(rows.get(b, 0), h)
|
||||
columns[r] = max(columns.get(r, 0), w)
|
||||
|
||||
etag = headers['ETag'] = base64.b32encode(etag.digest()).decode()
|
||||
if if_none_match == etag:
|
||||
return werkzeug.wrappers.Response(status=304, headers=headers)
|
||||
|
||||
pad_w, pad_h = 20, 5
|
||||
image_height = sum(rows.values()) + 2 * pad_h * len(rows)
|
||||
image_width = sum(columns.values()) + 2 * pad_w * len(columns)
|
||||
im = Image.new("RGB", (image_width+1, image_height+1), color='white')
|
||||
draw = ImageDraw.Draw(im, 'RGB')
|
||||
draw.font = font
|
||||
|
||||
# for reasons of that being more convenient we store the bottom of the
|
||||
# current row, so getting the top edge requires subtracting h
|
||||
w = left = bottom = 0
|
||||
for b, r in product(chain([None], branches), chain([None], repos)):
|
||||
left += w
|
||||
|
||||
opacity = 1.0 if b is None or b.active else 0.5
|
||||
background = BG['info'] if b == pr.target or r == pr.repository else BG[None]
|
||||
w, h = columns[r] + 2 * pad_w, rows[b] + 2 * pad_h
|
||||
|
||||
if r is None: # branch cell in row
|
||||
left = 0
|
||||
bottom += h
|
||||
if b:
|
||||
draw.rectangle(
|
||||
(left + 1, bottom - h + 1, left+w - 1, bottom - 1),
|
||||
background,
|
||||
)
|
||||
draw.text(
|
||||
(left + pad_w, bottom - h + pad_h),
|
||||
b.name,
|
||||
fill=blend(TEXT, opacity, over=background),
|
||||
)
|
||||
elif b is None: # repo cell in top row
|
||||
draw.rectangle((left + 1, bottom - h + 1, left+w - 1, bottom - 1), background)
|
||||
draw.text((left + pad_w, bottom - h + pad_h), r.name, fill=TEXT, font=bold)
|
||||
# draw the bottom-right edges of the cell
|
||||
draw.line([
|
||||
(left, bottom), # bottom-left
|
||||
(left + w, bottom), # bottom-right
|
||||
(left+w, bottom-h) # top-right
|
||||
], fill=(172, 176, 170))
|
||||
if r is None or b is None:
|
||||
continue
|
||||
|
||||
ps = batches[r, b]
|
||||
|
||||
bgcolor = BG[ps['state']]
|
||||
if pr in ps['pr_ids']:
|
||||
bgcolor = lighten(bgcolor, by=-0.05)
|
||||
background = blend(bgcolor, opacity, over=background)
|
||||
draw.rectangle((left + 1, bottom - h + 1, left+w - 1, bottom - 1), background)
|
||||
|
||||
top = bottom - h + pad_h
|
||||
offset = left + pad_w
|
||||
for p in ps['prs']:
|
||||
label = f"#{p['number']}"
|
||||
foreground = blend((39, 110, 114), opacity, over=background)
|
||||
draw.text((offset, top), label, fill=foreground)
|
||||
x, _, ww, hh = font.getbbox(label)
|
||||
if p['closed']:
|
||||
draw.line([
|
||||
(offset+x, top + hh - hh/3),
|
||||
(offset+x+ww, top + hh - hh/3),
|
||||
], fill=foreground)
|
||||
offset += ww
|
||||
if not p['attached']:
|
||||
# overdraw top border to mark the detachment
|
||||
draw.line([(left, bottom-h), (left+w, bottom-h)], fill=ERROR)
|
||||
for attribute in filter(None, [
|
||||
'error' if p['pr'].error else '',
|
||||
'' if p['checked'] else 'unchecked',
|
||||
'' if p['reviewed'] else 'unreviewed',
|
||||
'' if p['attached'] else 'detached',
|
||||
]):
|
||||
label = f' {attribute}'
|
||||
draw.text((offset, top), label,
|
||||
fill=blend(ERROR, opacity, over=background),
|
||||
font=supfont)
|
||||
offset += supfont.getbbox(label)[2]
|
||||
offset += math.ceil(supfont.getlength(" "))
|
||||
|
||||
buffer = io.BytesIO()
|
||||
im.save(buffer, 'png', optimize=True)
|
||||
return werkzeug.wrappers.Response(buffer.getvalue(), headers=headers)
|
||||
|
||||
Color = Tuple[int, int, int]
|
||||
TEXT: Color = (102, 102, 102)
|
||||
ERROR: Color = (220, 53, 69)
|
||||
BG: Mapping[str | None, Color] = collections.defaultdict(lambda: (255, 255, 255), {
|
||||
'info': (217, 237, 247),
|
||||
'success': (223, 240, 216),
|
||||
'warning': (252, 248, 227),
|
||||
'danger': (242, 222, 222),
|
||||
})
|
||||
def blend_single(c: int, over: int, opacity: float) -> int:
|
||||
return round(over * (1 - opacity) + c * opacity)
|
||||
|
||||
def blend(color: Color, opacity: float, *, over: Color = (255, 255, 255)) -> Color:
|
||||
assert 0.0 <= opacity <= 1.0
|
||||
return (
|
||||
blend_single(color[0], over[0], opacity),
|
||||
blend_single(color[1], over[1], opacity),
|
||||
blend_single(color[2], over[2], opacity),
|
||||
)
|
||||
|
||||
def lighten(color: Color, *, by: float) -> Color:
|
||||
# colorsys uses values in the range [0, 1] rather than pillow/CSS-style [0, 225]
|
||||
r, g, b = tuple(c / 255 for c in color)
|
||||
hue, lightness, saturation = colorsys.rgb_to_hls(r, g, b)
|
||||
|
||||
# by% of the way between value and 1.0
|
||||
if by >= 0: lightness += (1.0 - lightness) * by
|
||||
# -by% of the way between 0 and value
|
||||
else:lightness *= (1.0 + by)
|
||||
|
||||
return cast(Color, tuple(
|
||||
round(c * 255)
|
||||
for c in colorsys.hls_to_rgb(hue, lightness, saturation)
|
||||
))
|
||||
|
@ -40,7 +40,7 @@ runbot_merge.command.approve.failure,@{user} you may want to rebuild or fix this
|
||||
|
||||
user: github login of comment sender
|
||||
pr: pr object to which the command was sent"
|
||||
runbot_merge.command.unapprove.p0,"PR priority reset to 1, as pull requests with priority 0 ignore review state.","Responds to r- of pr in p=0.
|
||||
runbot_merge.command.unapprove.p0,"Skipchecks removed due to r-.","Responds to r- of pr in skipchecks.
|
||||
|
||||
user: github login of comment sender
|
||||
pr: pr object to which the command was sent"
|
||||
@ -53,7 +53,7 @@ runbot_merge.failure.approved,{pr.ping}{status!r} failed on this reviewed PR.,"N
|
||||
|
||||
pr: pull request in question
|
||||
status: failed status"
|
||||
runbot_merge.pr.created,[Pull request status dashboard]({pr.url}).,"Initial comment on PR creation.
|
||||
runbot_merge.pr.created,[]({pr.url}),"Initial comment on PR creation.
|
||||
|
||||
pr: created pr"
|
||||
runbot_merge.pr.linked.not_ready,{pr.ping}linked pull request(s) {siblings} not ready. Linked PRs are not staged until all of them are ready.,"Comment when a PR is ready (approved & validated) but it is linked to other PRs which are not.
|
||||
@ -107,16 +107,13 @@ pr: PR where update followup conflict happened
|
||||
previous: parent PR which triggered the followup
|
||||
stdout: markdown-formatted stdout of git, if any
|
||||
stderr: markdown-formatted stderr of git, if any"
|
||||
runbot_merge.forwardport.update.detached,{pr.ping}this PR was modified / updated and has become a normal PR. It should be merged the normal way (via @{pr.repository.project_id.github_prefix}),"Comment when a forwardport PR gets updated, documents that the PR now needs to be merged the “normal” way.
|
||||
runbot_merge.forwardport.update.detached,{pr.ping}this PR was modified / updated and has become a normal PR. It must be merged directly.,"Comment when a forwardport PR gets updated, documents that the PR now needs to be merged the “normal” way.
|
||||
|
||||
pr: the pr in question "
|
||||
runbot_merge.forwardport.update.parent,{pr.ping}child PR {child.display_name} was modified / updated and has become a normal PR. This PR (and any of its parents) will need to be merged independently as approvals won't cross.,"Sent to an open PR when its direct child has been detached.
|
||||
|
||||
pr: the pr
|
||||
child: its detached child"
|
||||
runbot_merge.forwardport.reopen.detached,{pr.ping}this PR was closed then reopened. It should be merged the normal way (via @{pr.repository.project_id.github_prefix}),"Comment when a forwardport PR gets closed then reopened, documents that the PR is now in a detached state.
|
||||
|
||||
pr: the pr in question"
|
||||
runbot_merge.forwardport.ci.failed,{pr.ping}{ci} failed on this forward-port PR,"Comment when CI fails on a forward-port PR (which thus won't port any further, for now).
|
||||
|
||||
pr: the pr in question
|
||||
@ -128,7 +125,7 @@ linked: the linked PR with a different next target
|
||||
next: next target for the current pr
|
||||
other: next target for the other pr"
|
||||
runbot_merge.forwardport.failure.conflict,"{pr.ping}the next pull request ({new.display_name}) is in conflict. You can merge the chain up to here by saying
|
||||
> @{pr.repository.project_id.fp_github_name} r+
|
||||
> @{pr.repository.project_id.github_prefix} r+
|
||||
{footer}","Comment when a forward port was created but is in conflict, warns of that & gives instructions for current PR.
|
||||
|
||||
pr: the pr which was just forward ported
|
||||
@ -163,14 +160,14 @@ footer: some footer text"
|
||||
runbot_merge.forwardport.final,"{pr.ping}this PR targets {pr.target.name} and is the last of the forward-port chain{containing}
|
||||
{ancestors}
|
||||
To merge the full chain, use
|
||||
> @{pr.repository.project_id.fp_github_name} r+
|
||||
> @{pr.repository.project_id.github_prefix} r+
|
||||
{footer}","Comment when a forward port was created and is the last of a sequence (target the limit branch).
|
||||
|
||||
pr: the new forward port
|
||||
containing: label changing depending whether there are ancestors to merge
|
||||
ancestors: markdown formatted list of parent PRs which can be approved as part of the chain
|
||||
footer: a footer"
|
||||
runbot_merge.forwardport.intermediate,"This PR targets {pr.target.name} and is part of the forward-port chain. Further PRs will be created up to {pr.limit_id.name}.
|
||||
runbot_merge.forwardport.intermediate,"This PR targets {pr.target.name} and is part of the forward-port chain. Further PRs will be created up to {pr.limit_pretty}.
|
||||
{footer}","Comment when a forward port was succcessfully created but is not the last of the line.
|
||||
|
||||
pr: the new forward port
|
||||
|
|
11
runbot_merge/migrations/15.0.1.10/pre-migration.py
Normal file
11
runbot_merge/migrations/15.0.1.10/pre-migration.py
Normal file
@ -0,0 +1,11 @@
|
||||
""" Migration for the unified commands parser, fp_github fields moved from
|
||||
forwardport to mergebot (one of them is removed but we might not care)
|
||||
"""
|
||||
def migrate(cr, version):
|
||||
cr.execute("""
|
||||
UPDATE ir_model_data
|
||||
SET module = 'runbot_merge'
|
||||
WHERE module = 'forwardport'
|
||||
AND model = 'ir.model.fields'
|
||||
AND name in ('fp_github_token', 'fp_github_name')
|
||||
""")
|
124
runbot_merge/migrations/15.0.1.11/pre-migration.py
Normal file
124
runbot_merge/migrations/15.0.1.11/pre-migration.py
Normal file
@ -0,0 +1,124 @@
|
||||
def move_fields(cr, *names):
|
||||
cr.execute("""
|
||||
UPDATE ir_model_data
|
||||
SET module = 'runbot_merge'
|
||||
WHERE module = 'forwardport'
|
||||
AND model = 'runbot_merge_pull_requests'
|
||||
AND name IN %s
|
||||
""", [names])
|
||||
|
||||
def migrate(cr, version):
|
||||
# cleanup some old crap
|
||||
cr.execute("""
|
||||
ALTER TABLE runbot_merge_project_freeze
|
||||
DROP COLUMN IF EXISTS release_label,
|
||||
DROP COLUMN IF EXISTS bump_label
|
||||
""")
|
||||
|
||||
# fw constraint moved to mergebot, alongside all the fields it constrains
|
||||
cr.execute("""
|
||||
UPDATE ir_model_data
|
||||
SET module = 'runbot_merge'
|
||||
WHERE module = 'forwardport'
|
||||
AND model = 'ir.model.constraint'
|
||||
AND name = 'constraint_runbot_merge_pull_requests_fw_constraint'
|
||||
""")
|
||||
move_fields(
|
||||
cr, 'merge_date', 'refname',
|
||||
'limit_id', 'source_id', 'parent_id', 'root_id', 'forwardport_ids',
|
||||
'detach_reason', 'fw_policy')
|
||||
|
||||
# view depends on pr.state, which prevents changing the state column's type
|
||||
# we can just drop the view and it'll be recreated by the db update
|
||||
cr.execute("DROP VIEW runbot_merge_freeze_labels")
|
||||
# convert a few data types
|
||||
cr.execute("""
|
||||
CREATE TYPE runbot_merge_pull_requests_priority_type
|
||||
AS ENUM ('default', 'priority', 'alone');
|
||||
|
||||
CREATE TYPE runbot_merge_pull_requests_state_type
|
||||
AS ENUM ('opened', 'closed', 'validated', 'approved', 'ready', 'merged', 'error');
|
||||
|
||||
CREATE TYPE runbot_merge_pull_requests_merge_method_type
|
||||
AS ENUM ('merge', 'rebase-merge', 'rebase-ff', 'squash');
|
||||
|
||||
CREATE TYPE runbot_merge_pull_requests_status_type
|
||||
AS ENUM ('pending', 'failure', 'success');
|
||||
|
||||
|
||||
ALTER TABLE runbot_merge_pull_requests
|
||||
ALTER COLUMN priority
|
||||
TYPE runbot_merge_pull_requests_priority_type
|
||||
USING CASE WHEN priority = 0
|
||||
THEN 'alone'
|
||||
ELSE 'default'
|
||||
END::runbot_merge_pull_requests_priority_type,
|
||||
ALTER COLUMN state
|
||||
TYPE runbot_merge_pull_requests_state_type
|
||||
USING state::runbot_merge_pull_requests_state_type,
|
||||
ALTER COLUMN merge_method
|
||||
TYPE runbot_merge_pull_requests_merge_method_type
|
||||
USING merge_method::runbot_merge_pull_requests_merge_method_type;
|
||||
""")
|
||||
|
||||
cr.execute("""
|
||||
ALTER TABLE runbot_merge_pull_requests
|
||||
ADD COLUMN closed boolean not null default 'false',
|
||||
ADD COLUMN error boolean not null default 'false',
|
||||
ADD COLUMN skipchecks boolean not null default 'false',
|
||||
ADD COLUMN cancel_staging boolean not null default 'false',
|
||||
|
||||
ADD COLUMN statuses text not null default '{}',
|
||||
ADD COLUMN statuses_full text not null default '{}',
|
||||
ADD COLUMN status runbot_merge_pull_requests_status_type not null default 'pending'
|
||||
""")
|
||||
# first pass: update all the new unconditional (or simple) fields
|
||||
cr.execute("""
|
||||
UPDATE runbot_merge_pull_requests p
|
||||
SET closed = state = 'closed',
|
||||
error = state = 'error',
|
||||
skipchecks = priority = 'alone',
|
||||
cancel_staging = priority = 'alone',
|
||||
fw_policy = CASE fw_policy WHEN 'ci' THEN 'default' ELSE fw_policy END,
|
||||
reviewed_by = CASE state
|
||||
-- old version did not reset reviewer on PR update
|
||||
WHEN 'opened' THEN NULL
|
||||
WHEN 'validated' THEN NULL
|
||||
-- if a PR predates the reviewed_by field, assign odoobot as reviewer
|
||||
WHEN 'merged' THEN coalesce(reviewed_by, 2)
|
||||
ELSE reviewed_by
|
||||
END,
|
||||
status = CASE state
|
||||
WHEN 'validated' THEN 'success'
|
||||
WHEN 'ready' THEN 'success'
|
||||
WHEN 'merged' THEN 'success'
|
||||
ELSE 'pending'
|
||||
END::runbot_merge_pull_requests_status_type
|
||||
""")
|
||||
|
||||
# the rest only gets updated if we have a matching commit which is not
|
||||
# always the case
|
||||
cr.execute("""
|
||||
CREATE TEMPORARY TABLE parents ( id INTEGER not null, overrides jsonb not null );
|
||||
WITH RECURSIVE parent_chain AS (
|
||||
SELECT id, overrides::jsonb
|
||||
FROM runbot_merge_pull_requests
|
||||
WHERE parent_id IS NULL
|
||||
UNION ALL
|
||||
SELECT p.id, coalesce(pc.overrides || p.overrides::jsonb, pc.overrides, p.overrides::jsonb) as overrides
|
||||
FROM runbot_merge_pull_requests p
|
||||
JOIN parent_chain pc ON p.parent_id = pc.id
|
||||
)
|
||||
INSERT INTO parents SELECT * FROM parent_chain;
|
||||
CREATE INDEX ON parents (id);
|
||||
|
||||
UPDATE runbot_merge_pull_requests p
|
||||
SET statuses = jsonb_pretty(c.statuses::jsonb)::text,
|
||||
statuses_full = jsonb_pretty(
|
||||
c.statuses::jsonb
|
||||
|| coalesce((select overrides from parents where id = p.parent_id), '{}')
|
||||
|| overrides::jsonb
|
||||
)::text
|
||||
FROM runbot_merge_commit c
|
||||
WHERE p.head = c.sha
|
||||
""")
|
833
runbot_merge/migrations/15.0.1.12/pre-migration.py
Normal file
833
runbot_merge/migrations/15.0.1.12/pre-migration.py
Normal file
@ -0,0 +1,833 @@
|
||||
"""This is definitely the giantest of fucks as pretty much the entire model was
|
||||
reworked
|
||||
"""
|
||||
import dataclasses
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from itertools import chain
|
||||
from typing import TypeVar, Any
|
||||
|
||||
from psycopg2.extras import execute_batch, execute_values
|
||||
from psycopg2.sql import SQL
|
||||
|
||||
logger = logging.getLogger("odoo.modules.migration.runbot_merge.15.0.1.12")
|
||||
|
||||
def cleanup(cr):
|
||||
"""There seems to be some *pretty* weird database state having crept
|
||||
"""
|
||||
# Until 2021 (not sure why exactly) a bunch of batches were created with no
|
||||
# PRs, some staged and some not.
|
||||
logger.info("Delete batches without PRs...")
|
||||
cr.execute("""
|
||||
DELETE FROM runbot_merge_batch
|
||||
WHERE id IN (
|
||||
SELECT b.id
|
||||
FROM runbot_merge_batch b
|
||||
LEFT JOIN runbot_merge_batch_runbot_merge_pull_requests_rel r ON (b.id = r.runbot_merge_batch_id)
|
||||
WHERE r.runbot_merge_batch_id IS NULL
|
||||
)
|
||||
""")
|
||||
# some of the batches above were the only ones of their stagings
|
||||
logger.info("Delete stagings without batches...")
|
||||
cr.execute("""
|
||||
DELETE FROM runbot_merge_stagings
|
||||
WHERE id IN (
|
||||
SELECT s.id
|
||||
FROM runbot_merge_stagings s
|
||||
LEFT JOIN runbot_merge_batch b ON (s.id = b.staging_id)
|
||||
WHERE b.id IS NULL
|
||||
)
|
||||
""")
|
||||
|
||||
# check PRs whose source has a source
|
||||
cr.execute("""
|
||||
SELECT
|
||||
p.id AS id,
|
||||
s.id AS source_id,
|
||||
r.name || '#' || p.number AS pr,
|
||||
pr.name || '#' || pp.number AS parent,
|
||||
sr.name || '#' || s.number AS source
|
||||
|
||||
FROM runbot_merge_pull_requests p
|
||||
JOIN runbot_merge_repository r ON (r.id = p.repository)
|
||||
|
||||
JOIN runbot_merge_pull_requests pp ON (pp.id = p.source_id)
|
||||
JOIN runbot_merge_repository pr ON (pr.id = pp.repository)
|
||||
|
||||
JOIN runbot_merge_pull_requests s ON (s.id = pp.source_id)
|
||||
JOIN runbot_merge_repository sr ON (sr.id = s.repository)
|
||||
ORDER BY p.id;
|
||||
""")
|
||||
for pid, ssid, _, _, _ in cr.fetchall():
|
||||
cr.execute("UPDATE runbot_merge_pull_requests SET source_id = %s WHERE id = %s", [ssid, pid])
|
||||
|
||||
def hlink(url):
|
||||
"""A terminal hlink starts with OSC8;{params};{link}ST and ends with the
|
||||
sequence with no params or link
|
||||
"""
|
||||
return f'\x9d8;;{url}\x9c'
|
||||
|
||||
def link(label, url):
|
||||
return f"{hlink(url)}{label}{hlink('')}"
|
||||
|
||||
|
||||
def batch_freezes(cr):
|
||||
"""Old freezes were created batch-less but marked as merged, to make things
|
||||
more consistent and avoid losing them for e.g. synthetic git histories,
|
||||
associate then with synthetic successful stagings
|
||||
"""
|
||||
cr.execute("SELECT id FROM res_users WHERE login = 'moc@odoo.com'")
|
||||
[uid] = cr.fetchone()
|
||||
cr.execute("""
|
||||
SELECT
|
||||
array_agg(DISTINCT p.target) AS target,
|
||||
array_agg(DISTINCT p.merge_date) AS merge_date,
|
||||
json_object_agg(r.id, json_build_object(
|
||||
'id', p.id,
|
||||
'head', p.commits_map::json->''
|
||||
)) AS prs
|
||||
|
||||
FROM runbot_merge_pull_requests p
|
||||
JOIN runbot_merge_repository r ON (r.id = p.repository)
|
||||
JOIN runbot_merge_branch t ON (t.id = p.target)
|
||||
|
||||
LEFT JOIN runbot_merge_batch_runbot_merge_pull_requests_rel bp ON (runbot_merge_pull_requests_id = p.id)
|
||||
LEFT JOIN runbot_merge_batch b ON (runbot_merge_batch_id = b.id)
|
||||
LEFT JOIN runbot_merge_stagings s ON (b.staging_id = s.id)
|
||||
|
||||
WHERE p.state = 'merged'
|
||||
AND runbot_merge_pull_requests_id IS NULL
|
||||
AND p.id != 1
|
||||
|
||||
GROUP BY label;
|
||||
""")
|
||||
freeze_batches = [
|
||||
(target, merge_date, {int(r): p for r, p in prs.items()})
|
||||
for [target], [merge_date], prs in cr._obj
|
||||
]
|
||||
|
||||
stagings = []
|
||||
for t, m, prs in freeze_batches:
|
||||
# fetch the preceding successful staging on master
|
||||
cr.execute("""
|
||||
SELECT id
|
||||
FROM runbot_merge_stagings
|
||||
-- target 1 = master (so we want the last successful master staging before the freeze)
|
||||
WHERE state = 'success' AND staged_at < %s AND target = 1
|
||||
ORDER BY staged_at DESC
|
||||
LIMIT 1
|
||||
""", [m])
|
||||
cr.execute("""
|
||||
SELECT repository_id, commit_id
|
||||
FROM runbot_merge_stagings_commits
|
||||
WHERE staging_id = %s
|
||||
""", cr.fetchone())
|
||||
commits = dict(cr._obj)
|
||||
|
||||
cr.execute("""
|
||||
INSERT INTO runbot_merge_stagings
|
||||
(state, active, create_uid, write_uid, target, staged_at, create_date, write_date)
|
||||
VALUES ('success', false, %s, %s, %s, %s, %s, %s)
|
||||
RETURNING id
|
||||
""", [uid, uid, t, m, m, m])
|
||||
[[staging]] = cr.fetchall()
|
||||
stagings.append(staging)
|
||||
|
||||
for repo, pr in prs.items():
|
||||
if repo not in commits:
|
||||
cr.execute("""
|
||||
INSERT INTO runbot_merge_commit (sha) VALUES (%s)
|
||||
ON CONFLICT (sha) DO UPDATE
|
||||
SET to_check = runbot_merge.to_check
|
||||
RETURNING id
|
||||
""", [pr['head']])
|
||||
[cid] = cr.fetchone()
|
||||
commits[repo] = cid
|
||||
|
||||
for repo, commit in commits.items():
|
||||
cr.execute("""
|
||||
INSERT INTO runbot_merge_stagings_commits
|
||||
(staging_id, repository_id, commit_id)
|
||||
VALUES (%s, %s, %s)
|
||||
""", [staging, repo, commit])
|
||||
cr.execute("""
|
||||
INSERT INTO runbot_merge_stagings_heads
|
||||
(staging_id, repository_id, commit_id)
|
||||
VALUES (%s, %s, %s)
|
||||
""", [staging, repo, commit])
|
||||
|
||||
batches = []
|
||||
for staging, (_, date, _) in zip(stagings, freeze_batches):
|
||||
cr.execute("""
|
||||
INSERT INTO runbot_merge_batch
|
||||
(create_uid, write_uid, staging_id, create_date, write_date)
|
||||
VALUES (%s, %s, %s, %s, %s)
|
||||
RETURNING id
|
||||
""", [uid, uid, staging, date, date])
|
||||
[[batch]] = cr.fetchall()
|
||||
batches.append(batch)
|
||||
|
||||
for batch, (_, _, prs) in zip(batches, freeze_batches):
|
||||
for pr in prs.values():
|
||||
cr.execute("""
|
||||
INSERT INTO runbot_merge_batch_runbot_merge_pull_requests_rel
|
||||
(runbot_merge_batch_id, runbot_merge_pull_requests_id)
|
||||
VALUES (%s, %s)
|
||||
""", [batch, pr['id']])
|
||||
|
||||
|
||||
def migrate(cr, version):
|
||||
cr.execute("select from forwardport_batches")
|
||||
assert not cr.rowcount, f"can't migrate the mergebot with enqueued forward ports (found {cr.rowcount})"
|
||||
# avoid SQL taking absolutely ungodly amounts of time
|
||||
cr.execute("SET statement_timeout = '60s'")
|
||||
# will be recreated & computed on the fly
|
||||
cr.execute("""
|
||||
ALTER TABLE runbot_merge_batch
|
||||
DROP COLUMN target,
|
||||
DROP COLUMN active
|
||||
""")
|
||||
|
||||
cleanup(cr)
|
||||
batch_freezes(cr)
|
||||
|
||||
cr.execute("""
|
||||
SELECT
|
||||
source_name,
|
||||
array_agg(json_build_array(gs.target, gs.prs) order by gs.seq desc)
|
||||
FROM (
|
||||
SELECT
|
||||
rr.name || '#' || source.number as source_name,
|
||||
t.sequence as seq,
|
||||
t.name as target,
|
||||
array_agg(json_build_array(r.name || '#' || p.number, p.state)) as prs
|
||||
|
||||
FROM runbot_merge_pull_requests p
|
||||
JOIN runbot_merge_repository r ON (r.id = p.repository)
|
||||
JOIN runbot_merge_branch t ON (t.id = p.target)
|
||||
|
||||
JOIN runbot_merge_pull_requests source ON (source.id = p.source_id)
|
||||
JOIN runbot_merge_repository rr ON (rr.id = source.repository)
|
||||
|
||||
GROUP BY source.id, rr.id, t.id
|
||||
HAVING count(*) FILTER (WHERE p.state = 'merged') > 1
|
||||
) gs
|
||||
GROUP BY source_name
|
||||
""")
|
||||
if cr.rowcount:
|
||||
msg = "Found inconsistent batches, which will confuse later chaining\n\n"
|
||||
for source, per_target in cr._obj:
|
||||
msg += f"source {source}\n"
|
||||
for target, prs in per_target:
|
||||
msg += "\t{} {}\n".format(
|
||||
target,
|
||||
", ".join(f'{p} ({s})' for p, s in prs),
|
||||
)
|
||||
raise Exception(msg)
|
||||
|
||||
logger.info("add batch columns...")
|
||||
cr.execute("""
|
||||
CREATE TYPE runbot_merge_batch_priority
|
||||
AS ENUM ('default', 'priority', 'alone');
|
||||
|
||||
ALTER TABLE runbot_merge_batch
|
||||
-- backfilled from staging
|
||||
ADD COLUMN merge_date timestamp,
|
||||
-- backfilled from PRs
|
||||
ADD COLUMN priority runbot_merge_batch_priority NOT NULL DEFAULT 'default',
|
||||
ADD COLUMN skipchecks boolean NOT NULL DEFAULT false,
|
||||
ADD COLUMN cancel_staging boolean NOT NULL DEFAULT false,
|
||||
ADD COLUMN fw_policy varchar NOT NULL DEFAULT 'default'
|
||||
;
|
||||
""")
|
||||
# batches not linked to stagings are likely to be useless
|
||||
logger.info("add batch/staging join table...")
|
||||
cr.execute("""
|
||||
CREATE TABLE runbot_merge_staging_batch (
|
||||
id serial PRIMARY KEY,
|
||||
runbot_merge_batch_id integer NOT NULL REFERENCES runbot_merge_batch(id) ON DELETE CASCADE,
|
||||
runbot_merge_stagings_id integer NOT NULL REFERENCES runbot_merge_stagings(id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE UNIQUE INDEX runbot_merge_staging_batch_idx ON runbot_merge_staging_batch
|
||||
(runbot_merge_stagings_id, runbot_merge_batch_id);
|
||||
CREATE INDEX runbot_merge_staging_batch_rev ON runbot_merge_staging_batch
|
||||
(runbot_merge_batch_id) INCLUDE (runbot_merge_stagings_id);
|
||||
""")
|
||||
# old 'bot creates a new batch at staging time, associated with that
|
||||
# specific staging, the way to recoup them (to the best of our ability) is
|
||||
# to assume a new style batch is a set of PRs, so if we group batches by prs
|
||||
# we get more or less the set of relevant batches / stagings
|
||||
logger.info("collect batches...")
|
||||
clusters, to_batch = collate_real_batches(cr)
|
||||
|
||||
logger.info("collate batches...")
|
||||
to_delete = []
|
||||
batch_staging_links = []
|
||||
to_rejoin = []
|
||||
for cluster in clusters.clusters:
|
||||
first = cluster.merged_batch or min(cluster.batches)
|
||||
to_delete.extend(cluster.batches - {first})
|
||||
# link all the PRs back to that batch
|
||||
to_rejoin.append((first, list(cluster.prs)))
|
||||
# link `first` to `staging`, ordering insertions by `batch` in order
|
||||
# to conserve batching order
|
||||
batch_staging_links.extend(
|
||||
(batch, first, staging)
|
||||
for batch, staging in cluster.stagings
|
||||
)
|
||||
|
||||
logger.info("link batches to stagings...")
|
||||
# sort (unique_batch, staging) by initial batch so that we create the new
|
||||
# bits in the correct order hopefully
|
||||
batch_staging_links.sort()
|
||||
execute_values(
|
||||
cr._obj,
|
||||
"INSERT INTO runbot_merge_staging_batch (runbot_merge_batch_id, runbot_merge_stagings_id) VALUES %s",
|
||||
((b, s) for _, b, s in batch_staging_links),
|
||||
page_size=1000,
|
||||
)
|
||||
|
||||
logger.info("detach PRs from \"active\" batches...")
|
||||
# there are non-deactivated batches floating around, which are not linked
|
||||
# to stagings, they seem linked to updates (forward-ported PRs getting
|
||||
# updated), but not exclusively
|
||||
cr.execute("UPDATE runbot_merge_pull_requests SET batch_id = NULL WHERE batch_id IS NOT NULL")
|
||||
# drop constraint because pg checks it even though we've set all the active batches to null
|
||||
cr.execute("ALTER TABLE runbot_merge_pull_requests DROP CONSTRAINT runbot_merge_pull_requests_batch_id_fkey")
|
||||
|
||||
while to_delete:
|
||||
ds, to_delete = to_delete[:10000], to_delete[10000:]
|
||||
logger.info("delete %d leftover batches", len(ds))
|
||||
cr.execute("DELETE FROM runbot_merge_batch WHERE id = any(%s)", [ds])
|
||||
|
||||
logger.info("delete staging column...")
|
||||
cr.execute("ALTER TABLE runbot_merge_batch DROP COLUMN staging_id;")
|
||||
|
||||
logger.info("relink PRs...")
|
||||
cr.execute("DROP TABLE runbot_merge_batch_runbot_merge_pull_requests_rel")
|
||||
execute_batch(
|
||||
cr._obj,
|
||||
"UPDATE runbot_merge_pull_requests SET batch_id = %s WHERE id = any(%s)",
|
||||
to_rejoin,
|
||||
page_size=1000,
|
||||
)
|
||||
|
||||
# at this point all the surviving batches should have associated PRs
|
||||
cr.execute("""
|
||||
SELECT b.id
|
||||
FROM runbot_merge_batch b
|
||||
LEFT JOIN runbot_merge_pull_requests p ON p.batch_id = b.id
|
||||
WHERE p IS NULL;
|
||||
""")
|
||||
if cr.rowcount:
|
||||
logger.error(
|
||||
"All batches should have at least one PR, found %d without",
|
||||
cr.rowcount,
|
||||
)
|
||||
|
||||
# the relinked batches are those from stagings, but that means merged PRs
|
||||
# (or at least PRs we tried to merge), we also need batches for non-closed
|
||||
# non-merged PRs
|
||||
logger.info("collect unbatched PRs...")
|
||||
cr.execute("""
|
||||
SELECT
|
||||
CASE
|
||||
WHEN label SIMILAR TO '%%:patch-[[:digit:]]+'
|
||||
THEN id::text
|
||||
ELSE label
|
||||
END as label_but_not,
|
||||
array_agg(id),
|
||||
array_agg(distinct target)
|
||||
FROM runbot_merge_pull_requests
|
||||
WHERE batch_id IS NULL AND id != all(%s)
|
||||
GROUP BY label_but_not
|
||||
""", [[pid for b in to_batch for pid in b]])
|
||||
for _label, ids, targets in cr._obj:
|
||||
# a few batches are nonsensical e.g. multiple PRs on different
|
||||
# targets from th same branch or mix of master upgrade and stable
|
||||
# branch community, split them out
|
||||
if len(targets) > 1:
|
||||
to_batch.extend([id] for id in ids)
|
||||
else:
|
||||
to_batch.append(ids)
|
||||
|
||||
logger.info("create %d new batches for unbatched prs...", len(to_batch))
|
||||
cr.execute(
|
||||
SQL("INSERT INTO runbot_merge_batch VALUES {} RETURNING id").format(
|
||||
SQL(", ").join([SQL("(DEFAULT)")]*len(to_batch))))
|
||||
logger.info("link unbatched PRs to batches...")
|
||||
execute_batch(
|
||||
cr._obj,
|
||||
"UPDATE runbot_merge_pull_requests SET batch_id = %s WHERE id = any(%s)",
|
||||
[(batch_id, ids) for ids, [batch_id] in zip(to_batch, cr.fetchall())],
|
||||
page_size=1000,
|
||||
)
|
||||
|
||||
cr.execute("SELECT state, count(*) FROM runbot_merge_pull_requests WHERE batch_id IS NULL GROUP BY state")
|
||||
if cr.rowcount:
|
||||
prs = cr.fetchall()
|
||||
logger.error(
|
||||
"Found %d PRs without a batch:%s",
|
||||
sum(c for _, c in prs),
|
||||
"".join(
|
||||
f"\n\t- {c} {p!r} PRs"
|
||||
for p, c in prs
|
||||
),
|
||||
)
|
||||
|
||||
logger.info("move pr data to batches...")
|
||||
cr.execute("""
|
||||
UPDATE runbot_merge_batch b
|
||||
SET merge_date = v.merge_date,
|
||||
priority = v.p::varchar::runbot_merge_batch_priority,
|
||||
skipchecks = v.skipchecks,
|
||||
cancel_staging = v.cancel_staging,
|
||||
fw_policy = case when v.skipci
|
||||
THEN 'skipci'
|
||||
ELSE 'default'
|
||||
END
|
||||
FROM (
|
||||
SELECT
|
||||
batch_id as id,
|
||||
max(priority) as p,
|
||||
min(merge_date) as merge_date,
|
||||
-- added to PRs in 1.11 so can be aggregated & copied over
|
||||
bool_or(skipchecks) as skipchecks,
|
||||
bool_or(cancel_staging) as cancel_staging,
|
||||
bool_or(fw_policy = 'skipci') as skipci
|
||||
FROM runbot_merge_pull_requests
|
||||
GROUP BY batch_id
|
||||
) v
|
||||
WHERE b.id = v.id
|
||||
""")
|
||||
|
||||
logger.info("restore batch constraint...")
|
||||
cr.execute("""
|
||||
ALTER TABLE runbot_merge_pull_requests
|
||||
ADD CONSTRAINT runbot_merge_pull_requests_batch_id_fkey
|
||||
FOREIGN KEY (batch_id)
|
||||
REFERENCES runbot_merge_batch (id)
|
||||
""")
|
||||
|
||||
# remove xid for x_prs (not sure why it exists)
|
||||
cr.execute("""
|
||||
DELETE FROM ir_model_data
|
||||
WHERE module = 'forwardport'
|
||||
AND name = 'field_forwardport_batches__x_prs'
|
||||
""")
|
||||
# update (x_)prs to match the updated field type(s)
|
||||
cr.execute("""
|
||||
UPDATE ir_model_fields
|
||||
SET ttype = 'one2many',
|
||||
relation = 'runbot_merge.pull_requests',
|
||||
relation_field = 'batch_id'
|
||||
WHERE model_id = 445 AND name = 'prs';
|
||||
|
||||
UPDATE ir_model_fields
|
||||
SET ttype = 'one2many'
|
||||
WHERE model_id = 448 AND name = 'x_prs';
|
||||
""")
|
||||
|
||||
logger.info("generate batch parenting...")
|
||||
cr.execute("SELECT id, project_id, name FROM runbot_merge_branch ORDER BY project_id, sequence, name")
|
||||
# branch_id -> str
|
||||
branch_names = {}
|
||||
# branch_id -> project_id
|
||||
projects = {}
|
||||
# project_id -> list[branch_id]
|
||||
branches_for_project = {}
|
||||
for bid, pid, name in cr._obj:
|
||||
branch_names[bid] = name
|
||||
projects[bid] = pid
|
||||
branches_for_project.setdefault(pid, []).append(bid)
|
||||
cr.execute("""
|
||||
SELECT batch_id,
|
||||
array_agg(distinct target),
|
||||
array_agg(json_build_object(
|
||||
'id', p.id,
|
||||
'name', r.name || '#' || number,
|
||||
'repo', r.name,
|
||||
'number', number,
|
||||
'state', p.state,
|
||||
'source', source_id
|
||||
))
|
||||
FROM runbot_merge_pull_requests p
|
||||
JOIN runbot_merge_repository r ON (r.id = p.repository)
|
||||
GROUP BY batch_id
|
||||
""")
|
||||
todos = []
|
||||
descendants = defaultdict(list)
|
||||
targets = {}
|
||||
batches = {}
|
||||
batch_prs = {}
|
||||
for batch, target_ids, prs in cr._obj:
|
||||
assert len(target_ids) == 1, \
|
||||
"Found batch with multiple targets {tnames} {prs}".format(
|
||||
tnames=', '.join(branch_names[id] for id in target_ids),
|
||||
prs=prs,
|
||||
)
|
||||
|
||||
todos.append((batch, target_ids[0], prs))
|
||||
batch_prs[batch] = prs
|
||||
for pr in prs:
|
||||
pr['link'] = link(pr['name'], "https://mergebot.odoo.com/{repo}/pull/{number}".format_map(pr))
|
||||
|
||||
targets[pr['id']] = target_ids[0]
|
||||
batches[pr['id']] = batch
|
||||
batches[pr['name']] = batch
|
||||
if pr['source']:
|
||||
descendants[pr['source']].append(pr['id'])
|
||||
else:
|
||||
# put source PRs as their own descendants otherwise the linkage
|
||||
# fails when trying to find the top-most parent
|
||||
descendants[pr['id']].append(pr['id'])
|
||||
assert None not in descendants
|
||||
|
||||
for prs in chain(
|
||||
KNOWN_BATCHES,
|
||||
chain.from_iterable(WEIRD_SEQUENCES),
|
||||
):
|
||||
batch_of_prs = {batches[f'odoo/{p}'] for p in prs}
|
||||
assert len(batch_of_prs) == 1,\
|
||||
"assumed {prs} were the same batch, got {batch_of_prs}".format(
|
||||
prs=', '.join(prs),
|
||||
batch_of_prs='; '.join(
|
||||
'{} => {}'.format(p, batches[f'odoo/{p}'])
|
||||
for p in prs
|
||||
)
|
||||
)
|
||||
|
||||
prs_of_batch = {pr['name'].removeprefix('odoo/') for pr in batch_prs[batch_of_prs.pop()]}
|
||||
assert set(prs) == prs_of_batch,\
|
||||
"assumed batch would contain {prs}, got {prs_of_batch}".format(
|
||||
prs=', '.join(prs),
|
||||
prs_of_batch=', '.join(prs_of_batch),
|
||||
)
|
||||
|
||||
parenting = []
|
||||
for batch, target, prs in todos:
|
||||
sources = [p['source'] for p in prs if p['source']]
|
||||
# can't have parent batch without source PRs
|
||||
if not sources:
|
||||
continue
|
||||
|
||||
pid = projects[target]
|
||||
branches = branches_for_project[pid]
|
||||
|
||||
# we need all the preceding targets in order to jump over disabled branches
|
||||
previous_targets = branches[branches.index(target) + 1:]
|
||||
if not previous_targets:
|
||||
continue
|
||||
|
||||
for previous_target in previous_targets:
|
||||
# from each source, find the descendant targeting the earlier target,
|
||||
# then get the batch of these PRs
|
||||
parents = {
|
||||
batches[descendant]
|
||||
for source in sources
|
||||
for descendant in descendants[source]
|
||||
if targets[descendant] == previous_target
|
||||
}
|
||||
if parents:
|
||||
break
|
||||
else:
|
||||
continue
|
||||
|
||||
if len(parents) == 2:
|
||||
parents1, parents2 = [batch_prs[parent] for parent in parents]
|
||||
# if all of one parent are merged and all of the other are not, take the merged side
|
||||
if all(p['state'] == 'merged' for p in parents1) and all(p['state'] != 'merged' for p in parents2):
|
||||
parents = [list(parents)[0]]
|
||||
elif all(p['state'] != 'merged' for p in parents1) and all(p['state'] == 'merged' for p in parents2):
|
||||
parents = [list(parents)[1]]
|
||||
elif len(parents1) == 1 and len(parents2) == 1 and len(prs) == 1:
|
||||
# if one of the candidates is older than the current PR
|
||||
# (lower id) and the other one younger, assume the first one is
|
||||
# correct
|
||||
p = min(parents, key=lambda p: batch_prs[p][0]['id'])
|
||||
low = batch_prs[p]
|
||||
high = batch_prs[max(parents, key=lambda p: batch_prs[p][0]['id'])]
|
||||
if low[0]['id'] < prs[0]['id'] < high[0]['id']:
|
||||
parents = [p]
|
||||
|
||||
if real_parents := SAAS_135_INSERTION_CONFUSION.get(tuple(sorted(parents))):
|
||||
parents = real_parents
|
||||
|
||||
assert len(parents) == 1,\
|
||||
("Found multiple candidates for batch {batch} ({prs})"
|
||||
" with target {target} (previous={previous_target})\n\t{parents}".format(
|
||||
parents="\n\t".join(
|
||||
"{} ({})".format(
|
||||
parent,
|
||||
", ".join(
|
||||
f"{p['link']} ({p['state']}, {branch_names[targets[p['id']]]})"
|
||||
for p in batch_prs[parent]
|
||||
)
|
||||
)
|
||||
for parent in parents
|
||||
),
|
||||
batch=batch,
|
||||
target=branch_names[target],
|
||||
previous_target=branch_names[previous_target],
|
||||
prs=', '.join(map("{link} ({state})".format_map, prs)),
|
||||
))
|
||||
parenting.append((parents.pop(), batch))
|
||||
|
||||
logger.info("set batch parenting...")
|
||||
# add column down here otherwise the FK constraint has to be verified for
|
||||
# each batch we try to delete and that is horrendously slow, deferring the
|
||||
# constraints is not awesome because we need to check it at the first DDL
|
||||
# and that's still way slower than feels necessary
|
||||
cr.execute("""
|
||||
ALTER TABLE runbot_merge_batch
|
||||
ADD COLUMN parent_id integer
|
||||
REFERENCES runbot_merge_batch(id)
|
||||
""")
|
||||
execute_batch(
|
||||
cr._obj,
|
||||
"UPDATE runbot_merge_batch SET parent_id = %s WHERE id = %s",
|
||||
parenting,
|
||||
page_size=1000,
|
||||
)
|
||||
|
||||
@dataclasses.dataclass(slots=True, kw_only=True)
|
||||
class Cluster:
|
||||
merged_batch: int | None = None
|
||||
prs: set[int] = dataclasses.field(default_factory=set)
|
||||
batches: set[int] = dataclasses.field(default_factory=set)
|
||||
stagings: set[tuple[int, int]] = dataclasses.field(default_factory=set)
|
||||
"set of original (batch, staging) pairs"
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Clusters:
|
||||
clusters: list[Cluster] = dataclasses.field(default_factory=list)
|
||||
by_batch: dict[int, Cluster] = dataclasses.field(default_factory=dict)
|
||||
by_pr: dict[int, Cluster] = dataclasses.field(default_factory=dict)
|
||||
|
||||
@dataclasses.dataclass(slots=True, kw_only=True)
|
||||
class Batch:
|
||||
staging: int | None = None
|
||||
merged: bool = False
|
||||
prs: set[int] = dataclasses.field(default_factory=set)
|
||||
|
||||
T = TypeVar('T')
|
||||
def insert(s: set[T], v: T) -> bool:
|
||||
"""Inserts v in s if not in, and returns whether an insertion was needed.
|
||||
"""
|
||||
if v in s:
|
||||
return False
|
||||
else:
|
||||
s.add(v)
|
||||
return True
|
||||
def collate_real_batches(cr: Any) -> tuple[Clusters, list[list[int]]]:
|
||||
cr.execute('''
|
||||
SELECT
|
||||
st.id as staging,
|
||||
st.state as staging_state,
|
||||
b.id as batch_id,
|
||||
p.id as pr_id
|
||||
FROM runbot_merge_batch_runbot_merge_pull_requests_rel br
|
||||
JOIN runbot_merge_batch b ON (b.id = br.runbot_merge_batch_id)
|
||||
JOIN runbot_merge_pull_requests as p ON (p.id = br.runbot_merge_pull_requests_id)
|
||||
LEFT JOIN runbot_merge_stagings st ON (st.id = b.staging_id)
|
||||
''')
|
||||
batch_map: dict[int, Batch] = {}
|
||||
pr_to_batches = defaultdict(set)
|
||||
for staging_id, staging_state, batch_id, pr_id in cr.fetchall():
|
||||
pr_to_batches[pr_id].add(batch_id)
|
||||
|
||||
if batch := batch_map.get(batch_id):
|
||||
batch.prs.add(pr_id)
|
||||
else:
|
||||
batch_map[batch_id] = Batch(
|
||||
staging=staging_id,
|
||||
merged=staging_state == 'success',
|
||||
prs={pr_id},
|
||||
)
|
||||
|
||||
# maps a PR name to its id
|
||||
cr.execute("""
|
||||
SELECT r.name || '#' || p.number, p.id
|
||||
FROM runbot_merge_pull_requests p
|
||||
JOIN runbot_merge_repository r ON (r.id = p.repository)
|
||||
WHERE r.name || '#' || p.number = any(%s)
|
||||
""", [[f'odoo/{p}' for seq in WEIRD_SEQUENCES for b in seq if len(b) > 1 for p in b]])
|
||||
prmap: dict[str, int] = dict(cr._obj)
|
||||
to_batch = []
|
||||
# for each WEIRD_SEQUENCES batch, we need to merge their batches if any,
|
||||
# and create them otherwise
|
||||
for batch in (b for seq in WEIRD_SEQUENCES for b in seq if len(b) > 1):
|
||||
ids = [prmap[f'odoo/{n}'] for n in batch]
|
||||
batches = {b for pid in ids for b in pr_to_batches[pid]}
|
||||
if batches:
|
||||
for pid in ids:
|
||||
pr_to_batches[pid].update(batches)
|
||||
for bid in batches:
|
||||
batch_map[bid].prs.update(ids)
|
||||
else:
|
||||
# need to create a new batch
|
||||
to_batch.append(ids)
|
||||
|
||||
clusters = Clusters()
|
||||
# we can start from either the PR or the batch side to reconstruct a cluster
|
||||
for pr_id in pr_to_batches:
|
||||
if pr_id in clusters.by_pr:
|
||||
continue
|
||||
|
||||
to_visit = [pr_id]
|
||||
prs: set[int] = set()
|
||||
merged_batch = None
|
||||
batches: set[int] = set()
|
||||
stagings: set[tuple[int, int]] = set()
|
||||
while to_visit:
|
||||
pr_id = to_visit.pop()
|
||||
if not insert(prs, pr_id):
|
||||
continue
|
||||
|
||||
for batch_id in pr_to_batches[pr_id]:
|
||||
if not insert(batches, batch_id):
|
||||
continue
|
||||
|
||||
b = batch_map[batch_id]
|
||||
if s := b.staging:
|
||||
stagings.add((batch_id, s))
|
||||
if b.merged:
|
||||
merged_batch = batch_id
|
||||
to_visit.extend(b.prs - prs)
|
||||
|
||||
c = Cluster(merged_batch=merged_batch, prs=prs, batches=batches, stagings=stagings)
|
||||
clusters.clusters.append(c)
|
||||
clusters.by_batch.update((batch_id, c) for batch_id in c.batches)
|
||||
clusters.by_pr.update((pr_id, c) for pr_id in c.prs)
|
||||
|
||||
return clusters, to_batch
|
||||
|
||||
# at the creation of saas 13.5, the forwardbot clearly got very confused and
|
||||
# somehow did not correctly link the PRs it reinserted together, leading to
|
||||
# some of them being merged separately, leading the batch parenting linker thing
|
||||
# to be extremely confused
|
||||
SAAS_135_INSERTION_CONFUSION = {
|
||||
(48200, 48237): [48237],
|
||||
(48353, 48388): [48353],
|
||||
(48571, 48602): [48602],
|
||||
(73614, 73841): [73614],
|
||||
}
|
||||
|
||||
KNOWN_BATCHES = [
|
||||
# both closed, same source (should be trivial)
|
||||
["odoo#151827", "enterprise#55453"],
|
||||
["odoo#66743", "enterprise#16631"],
|
||||
|
||||
# both closed but different sources
|
||||
["odoo#57659", "enterprise#13204"],
|
||||
["odoo#57752", "enterprise#13238"],
|
||||
["odoo#94152", "enterprise#28664"],
|
||||
["odoo#114059", "enterprise#37690"],
|
||||
["odoo#152904", "enterprise#55975"],
|
||||
|
||||
# one closed the other not, different sources (so a PR was added in the
|
||||
# middle of a forward port then its descendant was closed evn though the
|
||||
# other repo / sequence kept on keeping)
|
||||
["odoo#113422", "enterprise#37429"],
|
||||
["odoo#151992", "enterprise#55501"],
|
||||
["odoo#159211", "enterprise#59407"],
|
||||
|
||||
# closed without a sibling but their source had a sibling
|
||||
["odoo#67727"], # enterprise closed at enterprise#16631
|
||||
["odoo#70828"], # enterprise closed at enterprise#17901
|
||||
["odoo#132817"], # enterprise closed at enterprise#44656
|
||||
["odoo#137855"], # enterprise closed at enterprise#48092
|
||||
["enterprise#49430"], # odoo closed at odoo#139515
|
||||
|
||||
["odoo#109811", "enterprise#35966"],
|
||||
["odoo#110311", "enterprise#35983"],
|
||||
["odoo#110576"],
|
||||
]
|
||||
|
||||
# This is next level weird compared to the previous so it gets extra care:
|
||||
# these are sequences with multiple points of divergence or grafting
|
||||
WEIRD_SEQUENCES = [
|
||||
[
|
||||
["odoo#40466"],
|
||||
["odoo#40607"],
|
||||
["odoo#40613", "odoo#41106"],
|
||||
["odoo#40615", "odoo#41112"],
|
||||
["odoo#40627", "odoo#41116", "odoo#41163"],
|
||||
["odoo#40638", "odoo#41119", "odoo#41165"],
|
||||
],
|
||||
[
|
||||
["odoo#46405"],
|
||||
["odoo#46698"],
|
||||
["odoo#46820"],
|
||||
["odoo#46974"],
|
||||
["odoo#47273"],
|
||||
["odoo#47345", "enterprise#9259"],
|
||||
["odoo#47349", "odoo#47724", "enterprise#9274"],
|
||||
],
|
||||
[
|
||||
["odoo#47923"],
|
||||
["odoo#47986"],
|
||||
["odoo#47991", "odoo#48010"],
|
||||
["odoo#47996", "odoo#48015", "odoo#48016"],
|
||||
["odoo#48003"],
|
||||
],
|
||||
[
|
||||
["enterprise#9996"],
|
||||
["enterprise#10062", "odoo#49828"],
|
||||
["enterprise#10065", "odoo#49852", "enterprise#10076"],
|
||||
["enterprise#10173", "odoo#50087"],
|
||||
["enterprise#10179", "odoo#50104"],
|
||||
["enterprise#10181", "odoo#50110"],
|
||||
],
|
||||
[
|
||||
["enterprise#16357"],
|
||||
["enterprise#16371"],
|
||||
["enterprise#16375", "enterprise#16381"],
|
||||
["enterprise#16378", "enterprise#16385"],
|
||||
["enterprise#16379", "enterprise#16390"],
|
||||
],
|
||||
[
|
||||
["odoo#55112"],
|
||||
["odoo#55120"],
|
||||
["odoo#55123", "odoo#55159"],
|
||||
["odoo#55128", "odoo#55169"],
|
||||
["odoo#55135", "odoo#55171"],
|
||||
["odoo#55140", "odoo#55172"],
|
||||
],
|
||||
[
|
||||
["odoo#56254", "enterprise#12558"],
|
||||
["odoo#56294", "enterprise#12564"],
|
||||
["odoo#56300", "enterprise#12566"],
|
||||
["odoo#56340", "enterprise#12589", "enterprise#12604"],
|
||||
["odoo#56391", "enterprise#12608"],
|
||||
],
|
||||
[
|
||||
["enterprise#12565", "odoo#56299"],
|
||||
["enterprise#12572", "odoo#56309", "odoo#56494"],
|
||||
["enterprise#12660", "odoo#56518"],
|
||||
["enterprise#12688", "odoo#56581"],
|
||||
["enterprise#12691"],
|
||||
],
|
||||
[
|
||||
["odoo#64706"],
|
||||
["odoo#65275"],
|
||||
["odoo#65279", "odoo#65405"],
|
||||
["odoo#65489", "odoo#65491"],
|
||||
],
|
||||
[
|
||||
["odoo#66176"],
|
||||
["odoo#66188"],
|
||||
["odoo#66191"],
|
||||
["odoo#66194", "odoo#66226"],
|
||||
["odoo#66200", "odoo#66229", "odoo#66277"],
|
||||
["odoo#66204", "odoo#66232", "odoo#66283"],
|
||||
["odoo#66208", "odoo#66234", "odoo#66285", "odoo#66303"],
|
||||
],
|
||||
[
|
||||
["enterprise#22089", "odoo#79348"],
|
||||
["enterprise#26736", "odoo#90050"],
|
||||
["enterprise#31822", "odoo#101218", "odoo#106002"],
|
||||
["enterprise#36014", "odoo#110369", "odoo#113892"],
|
||||
["enterprise#37690", "odoo#114059"],
|
||||
],
|
||||
]
|
@ -2,6 +2,7 @@ from . import ir_actions
|
||||
from . import res_partner
|
||||
from . import project
|
||||
from . import pull_requests
|
||||
from . import batch
|
||||
from . import project_freeze
|
||||
from . import stagings_create
|
||||
from . import staging_cancel
|
||||
|
533
runbot_merge/models/batch.py
Normal file
533
runbot_merge/models/batch.py
Normal file
@ -0,0 +1,533 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from collections.abc import Iterator
|
||||
|
||||
import requests
|
||||
from psycopg2 import sql
|
||||
|
||||
from odoo import models, fields, api
|
||||
from .utils import enum
|
||||
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
FOOTER = '\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n'
|
||||
|
||||
|
||||
class StagingBatch(models.Model):
|
||||
_name = 'runbot_merge.staging.batch'
|
||||
_description = "link between batches and staging in order to maintain an " \
|
||||
"ordering relationship between the batches of a staging"
|
||||
_log_access = False
|
||||
_order = 'id'
|
||||
|
||||
runbot_merge_batch_id = fields.Many2one('runbot_merge.batch', required=True)
|
||||
runbot_merge_stagings_id = fields.Many2one('runbot_merge.stagings', required=True)
|
||||
|
||||
def init(self):
|
||||
super().init()
|
||||
|
||||
self.env.cr.execute(sql.SQL("""
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS runbot_merge_staging_batch_idx
|
||||
ON {table} (runbot_merge_stagings_id, runbot_merge_batch_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS runbot_merge_staging_batch_rev
|
||||
ON {table} (runbot_merge_batch_id) INCLUDE (runbot_merge_stagings_id);
|
||||
""").format(table=sql.Identifier(self._table)))
|
||||
|
||||
|
||||
class Batch(models.Model):
|
||||
""" A batch is a "horizontal" grouping of *codependent* PRs: PRs with
|
||||
the same label & target but for different repositories. These are
|
||||
assumed to be part of the same "change" smeared over multiple
|
||||
repositories e.g. change an API in repo1, this breaks use of that API
|
||||
in repo2 which now needs to be updated.
|
||||
"""
|
||||
_name = 'runbot_merge.batch'
|
||||
_description = "batch of pull request"
|
||||
_inherit = ['mail.thread']
|
||||
_parent_store = True
|
||||
|
||||
name = fields.Char(compute="_compute_name")
|
||||
target = fields.Many2one('runbot_merge.branch', store=True, compute='_compute_target')
|
||||
batch_staging_ids = fields.One2many('runbot_merge.staging.batch', 'runbot_merge_batch_id')
|
||||
staging_ids = fields.Many2many(
|
||||
'runbot_merge.stagings',
|
||||
compute="_compute_staging_ids",
|
||||
context={'active_test': False},
|
||||
)
|
||||
split_id = fields.Many2one('runbot_merge.split', index=True)
|
||||
|
||||
all_prs = fields.One2many('runbot_merge.pull_requests', 'batch_id')
|
||||
prs = fields.One2many('runbot_merge.pull_requests', compute='_compute_open_prs', search='_search_open_prs')
|
||||
active = fields.Boolean(compute='_compute_active', store=True, help="closed batches (batches containing only closed PRs)")
|
||||
|
||||
fw_policy = fields.Selection([
|
||||
('default', "Default"),
|
||||
('skipci', "Skip CI"),
|
||||
], required=True, default="default", string="Forward Port Policy")
|
||||
|
||||
merge_date = fields.Datetime(tracking=True)
|
||||
# having skipchecks skip both validation *and approval* makes sense because
|
||||
# it's batch-wise, having to approve individual PRs is annoying
|
||||
skipchecks = fields.Boolean(
|
||||
string="Skips Checks",
|
||||
default=False, tracking=True,
|
||||
help="Forces entire batch to be ready, skips validation and approval",
|
||||
)
|
||||
cancel_staging = fields.Boolean(
|
||||
string="Cancels Stagings",
|
||||
default=False, tracking=True,
|
||||
help="Cancels current staging on target branch when becoming ready"
|
||||
)
|
||||
priority = fields.Selection([
|
||||
('default', "Default"),
|
||||
('priority', "Priority"),
|
||||
('alone', "Alone"),
|
||||
], default='default', group_operator=None, required=True,
|
||||
column_type=enum(_name, 'priority'),
|
||||
)
|
||||
|
||||
blocked = fields.Char(store=True, compute="_compute_stageable")
|
||||
|
||||
# unlike on PRs, this does not get detached... ? (because batches can be
|
||||
# partially detached so that's a PR-level concern)
|
||||
parent_path = fields.Char(index=True)
|
||||
parent_id = fields.Many2one("runbot_merge.batch")
|
||||
genealogy_ids = fields.Many2many(
|
||||
"runbot_merge.batch",
|
||||
compute="_compute_genealogy",
|
||||
context={"active_test": False},
|
||||
)
|
||||
|
||||
@api.depends('batch_staging_ids.runbot_merge_stagings_id')
|
||||
def _compute_staging_ids(self):
|
||||
for batch in self:
|
||||
batch.staging_ids = batch.batch_staging_ids.runbot_merge_stagings_id
|
||||
|
||||
@property
|
||||
def source(self):
|
||||
return self.browse(map(int, self.parent_path.split('/', 1)[:1]))
|
||||
|
||||
def descendants(self, include_self: bool = False) -> Iterator[Batch]:
|
||||
# in DB both will prefix-match on the literal prefix then apply a
|
||||
# trivial filter (even though the filter is technically unnecessary for
|
||||
# the first form), doing it like this means we don't have to `- self`
|
||||
# in the ``not include_self`` case
|
||||
if include_self:
|
||||
pattern = self.parent_path + '%'
|
||||
else:
|
||||
pattern = self.parent_path + '_%'
|
||||
|
||||
act = self.env.context.get('active_test', True)
|
||||
return self\
|
||||
.with_context(active_test=False)\
|
||||
.search([("parent_path", '=like', pattern)], order="parent_path")\
|
||||
.with_context(active_test=act)
|
||||
|
||||
# also depends on all the descendants of the source or sth
|
||||
@api.depends('parent_path')
|
||||
def _compute_genealogy(self):
|
||||
for batch in self:
|
||||
sid = next(iter(batch.parent_path.split('/', 1)))
|
||||
batch.genealogy_ids = self \
|
||||
.with_context(active_test=False)\
|
||||
.search([("parent_path", "=like", f"{sid}/%")], order="parent_path")\
|
||||
|
||||
def _auto_init(self):
|
||||
for field in self._fields.values():
|
||||
if not isinstance(field, fields.Selection) or field.column_type[0] == 'varchar':
|
||||
continue
|
||||
|
||||
t = field.column_type[1]
|
||||
self.env.cr.execute("SELECT FROM pg_type WHERE typname = %s", [t])
|
||||
if not self.env.cr.rowcount:
|
||||
self.env.cr.execute(
|
||||
f"CREATE TYPE {t} AS ENUM %s",
|
||||
[tuple(s for s, _ in field.selection)]
|
||||
)
|
||||
|
||||
super()._auto_init()
|
||||
|
||||
self.env.cr.execute("""
|
||||
CREATE INDEX IF NOT EXISTS runbot_merge_batch_ready_idx
|
||||
ON runbot_merge_batch (target, priority)
|
||||
WHERE blocked IS NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS runbot_merge_batch_parent_id_idx
|
||||
ON runbot_merge_batch (parent_id)
|
||||
WHERE parent_id IS NOT NULL;
|
||||
""")
|
||||
|
||||
@api.depends('all_prs.closed')
|
||||
def _compute_active(self):
|
||||
for b in self:
|
||||
b.active = not all(p.closed for p in b.all_prs)
|
||||
|
||||
@api.depends('all_prs.closed')
|
||||
def _compute_open_prs(self):
|
||||
for b in self:
|
||||
b.prs = b.all_prs.filtered(lambda p: not p.closed)
|
||||
|
||||
def _search_open_prs(self, operator, value):
|
||||
return [('all_prs', operator, value), ('active', '=', True)]
|
||||
|
||||
@api.depends("prs.label")
|
||||
def _compute_name(self):
|
||||
for batch in self:
|
||||
batch.name = batch.prs[:1].label or batch.all_prs[:1].label
|
||||
|
||||
@api.depends("all_prs.target")
|
||||
def _compute_target(self):
|
||||
for batch in self:
|
||||
if len(batch.prs) == 1:
|
||||
batch.target = batch.all_prs.target
|
||||
else:
|
||||
targets = set(batch.all_prs.mapped('target'))
|
||||
if not targets:
|
||||
targets = set(batch.all_prs.mapped('target'))
|
||||
if len(targets) == 1:
|
||||
batch.target = targets.pop()
|
||||
else:
|
||||
batch.target = False
|
||||
|
||||
@api.depends(
|
||||
"merge_date",
|
||||
"prs.error", "prs.draft", "prs.squash", "prs.merge_method",
|
||||
"skipchecks",
|
||||
"prs.status", "prs.reviewed_by", "prs.target",
|
||||
)
|
||||
def _compute_stageable(self):
|
||||
for batch in self:
|
||||
if batch.merge_date:
|
||||
batch.blocked = "Merged."
|
||||
elif not batch.active:
|
||||
batch.blocked = "all prs are closed"
|
||||
elif blocking := batch.prs.filtered(
|
||||
lambda p: p.error or p.draft or not (p.squash or p.merge_method)
|
||||
):
|
||||
batch.blocked = "Pull request(s) %s blocked." % ', '.join(blocking.mapped('display_name'))
|
||||
elif not batch.skipchecks and (unready := batch.prs.filtered(
|
||||
lambda p: not (p.reviewed_by and p.status == "success")
|
||||
)):
|
||||
unreviewed = ', '.join(unready.filtered(lambda p: not p.reviewed_by).mapped('display_name'))
|
||||
unvalidated = ', '.join(unready.filtered(lambda p: p.status == 'pending').mapped('display_name'))
|
||||
failed = ', '.join(unready.filtered(lambda p: p.status == 'failure').mapped('display_name'))
|
||||
batch.blocked = "Pull request(s) %s." % ', '.join(filter(None, [
|
||||
unreviewed and f"{unreviewed} are waiting for review",
|
||||
unvalidated and f"{unvalidated} are waiting for CI",
|
||||
failed and f"{failed} have failed CI",
|
||||
]))
|
||||
elif len(targets := batch.prs.mapped('target')) > 1:
|
||||
batch.blocked = f"Multiple target branches: {', '.join(targets.mapped('name'))!r}"
|
||||
else:
|
||||
if batch.blocked and batch.cancel_staging:
|
||||
batch.target.active_staging_id.cancel(
|
||||
'unstaged by %s on %s (%s)',
|
||||
self.env.user.login,
|
||||
batch,
|
||||
', '.join(batch.prs.mapped('display_name')),
|
||||
)
|
||||
batch.blocked = False
|
||||
|
||||
|
||||
def _port_forward(self):
|
||||
if not self:
|
||||
return
|
||||
|
||||
proj = self.target.project_id
|
||||
if not proj.fp_github_token:
|
||||
_logger.warning(
|
||||
"Can not forward-port %s (%s): no token on project %s",
|
||||
self,
|
||||
', '.join(self.prs.mapped('display_name')),
|
||||
proj.name
|
||||
)
|
||||
return
|
||||
|
||||
notarget = [r.name for r in self.prs.repository if not r.fp_remote_target]
|
||||
if notarget:
|
||||
_logger.error(
|
||||
"Can not forward-port %s (%s): repos %s don't have a forward port remote configured",
|
||||
self,
|
||||
', '.join(self.prs.mapped('display_name')),
|
||||
', '.join(notarget),
|
||||
)
|
||||
return
|
||||
|
||||
all_sources = [(p.source_id or p) for p in self.prs]
|
||||
all_targets = [p._find_next_target() for p in self.prs]
|
||||
|
||||
if all(t is None for t in all_targets):
|
||||
# TODO: maybe add a feedback message?
|
||||
_logger.info(
|
||||
"Will not forward port %s (%s): no next target",
|
||||
self,
|
||||
', '.join(self.prs.mapped('display_name'))
|
||||
)
|
||||
return
|
||||
|
||||
PRs = self.env['runbot_merge.pull_requests']
|
||||
targets = defaultdict(lambda: PRs)
|
||||
for p, t in zip(self.prs, all_targets):
|
||||
if t:
|
||||
targets[t] |= p
|
||||
else:
|
||||
_logger.info("Skip forward porting %s (of %s): no next target", p.display_name, self)
|
||||
|
||||
|
||||
# all the PRs *with a next target* should have the same, we can have PRs
|
||||
# stopping forward port earlier but skipping... probably not
|
||||
if len(targets) != 1:
|
||||
for t, prs in targets.items():
|
||||
linked, other = next((
|
||||
(linked, other)
|
||||
for other, linkeds in targets.items()
|
||||
if other != t
|
||||
for linked in linkeds
|
||||
))
|
||||
for pr in prs:
|
||||
self.env.ref('runbot_merge.forwardport.failure.discrepancy')._send(
|
||||
repository=pr.repository,
|
||||
pull_request=pr.number,
|
||||
token_field='fp_github_token',
|
||||
format_args={'pr': pr, 'linked': linked, 'next': t.name, 'other': other.name},
|
||||
)
|
||||
_logger.warning(
|
||||
"Cancelling forward-port of %s (%s): found different next branches (%s)",
|
||||
self,
|
||||
', '.join(self.prs.mapped('display_name')),
|
||||
', '.join(t.name for t in targets),
|
||||
)
|
||||
return
|
||||
|
||||
target, prs = next(iter(targets.items()))
|
||||
# this is run by the cron, no need to check if otherwise scheduled:
|
||||
# either the scheduled job is this one, or it's an other scheduling
|
||||
# which will run after this one and will see the port already exists
|
||||
if self.search_count([('parent_id', '=', self.id), ('target', '=', target.id)]):
|
||||
_logger.warning(
|
||||
"Will not forward-port %s (%s): already ported",
|
||||
self,
|
||||
', '.join(prs.mapped('display_name'))
|
||||
)
|
||||
return
|
||||
|
||||
# the base PR is the PR with the "oldest" target
|
||||
base = max(all_sources, key=lambda p: (p.target.sequence, p.target.name))
|
||||
# take only the branch bit
|
||||
new_branch = '%s-%s-%s-fw' % (
|
||||
target.name,
|
||||
base.refname,
|
||||
# avoid collisions between fp branches (labels can be reused
|
||||
# or conflict especially as we're chopping off the owner)
|
||||
base64.urlsafe_b64encode(os.urandom(3)).decode()
|
||||
)
|
||||
conflicts = {}
|
||||
with contextlib.ExitStack() as s:
|
||||
for pr in prs:
|
||||
conflicts[pr], working_copy = pr._create_fp_branch(
|
||||
target, new_branch, s)
|
||||
|
||||
working_copy.push('target', new_branch)
|
||||
|
||||
gh = requests.Session()
|
||||
gh.headers['Authorization'] = 'token %s' % proj.fp_github_token
|
||||
has_conflicts = any(conflicts.values())
|
||||
# could create a batch here but then we'd have to update `_from_gh` to
|
||||
# take a batch and then `create` to not automatically resolve batches,
|
||||
# easier to not do that.
|
||||
new_batch = PRs.browse(())
|
||||
self.env.cr.execute('LOCK runbot_merge_pull_requests IN SHARE MODE')
|
||||
for pr in prs:
|
||||
owner, _ = pr.repository.fp_remote_target.split('/', 1)
|
||||
source = pr.source_id or pr
|
||||
root = pr.root_id
|
||||
|
||||
message = source.message + '\n\n' + '\n'.join(
|
||||
"Forward-Port-Of: %s" % p.display_name
|
||||
for p in root | source
|
||||
)
|
||||
|
||||
title, body = re.match(r'(?P<title>[^\n]+)\n*(?P<body>.*)', message, flags=re.DOTALL).groups()
|
||||
r = gh.post(f'https://api.github.com/repos/{pr.repository.name}/pulls', json={
|
||||
'base': target.name,
|
||||
'head': f'{owner}:{new_branch}',
|
||||
'title': '[FW]' + (' ' if title[0] != '[' else '') + title,
|
||||
'body': body
|
||||
})
|
||||
if not r.ok:
|
||||
_logger.warning("Failed to create forward-port PR for %s, deleting branches", pr.display_name)
|
||||
# delete all the branches this should automatically close the
|
||||
# PRs if we've created any. Using the API here is probably
|
||||
# simpler than going through the working copies
|
||||
for repo in prs.mapped('repository'):
|
||||
d = gh.delete(f'https://api.github.com/repos/{repo.fp_remote_target}/git/refs/heads/{new_branch}')
|
||||
if d.ok:
|
||||
_logger.info("Deleting %s:%s=success", repo.fp_remote_target, new_branch)
|
||||
else:
|
||||
_logger.warning("Deleting %s:%s=%s", repo.fp_remote_target, new_branch, d.text)
|
||||
raise RuntimeError(f"Forwardport failure: {pr.display_name} ({r.text})")
|
||||
|
||||
new_pr = PRs._from_gh(r.json())
|
||||
_logger.info("Created forward-port PR %s", new_pr)
|
||||
new_batch |= new_pr
|
||||
|
||||
# allows PR author to close or skipci
|
||||
new_pr.write({
|
||||
'merge_method': pr.merge_method,
|
||||
'source_id': source.id,
|
||||
# only link to previous PR of sequence if cherrypick passed
|
||||
'parent_id': pr.id if not has_conflicts else False,
|
||||
'detach_reason': "conflicts:\n{}".format('\n\n'.join(
|
||||
f"{out}\n{err}".strip()
|
||||
for _, out, err, _ in filter(None, conflicts.values())
|
||||
)) if has_conflicts else None,
|
||||
})
|
||||
if has_conflicts and pr.parent_id and pr.state not in ('merged', 'closed'):
|
||||
self.env.ref('runbot_merge.forwardport.failure.conflict')._send(
|
||||
repository=pr.repository,
|
||||
pull_request=pr.number,
|
||||
token_field='fp_github_token',
|
||||
format_args={'source': source, 'pr': pr, 'new': new_pr, 'footer': FOOTER},
|
||||
)
|
||||
|
||||
for pr, new_pr in zip(prs, new_batch):
|
||||
new_pr._fp_conflict_feedback(pr, conflicts)
|
||||
|
||||
labels = ['forwardport']
|
||||
if has_conflicts:
|
||||
labels.append('conflict')
|
||||
self.env['runbot_merge.pull_requests.tagging'].create({
|
||||
'repository': new_pr.repository.id,
|
||||
'pull_request': new_pr.number,
|
||||
'tags_add': labels,
|
||||
})
|
||||
|
||||
new_batch = new_batch.batch_id
|
||||
new_batch.parent_id = self
|
||||
# try to schedule followup
|
||||
new_batch._schedule_fp_followup()
|
||||
return new_batch
|
||||
|
||||
def _schedule_fp_followup(self):
|
||||
_logger = logging.getLogger(__name__).getChild('forwardport.next')
|
||||
# if the PR has a parent and is CI-validated, enqueue the next PR
|
||||
scheduled = self.browse(())
|
||||
for batch in self:
|
||||
prs = ', '.join(batch.prs.mapped('display_name'))
|
||||
_logger.info('Checking if forward-port %s (%s)', batch, prs)
|
||||
# in cas of conflict or update individual PRs will "lose" their
|
||||
# parent, which should prevent forward porting
|
||||
if not (batch.parent_id and all(p.parent_id for p in batch.prs)):
|
||||
_logger.info('-> no parent %s (%s)', batch, prs)
|
||||
continue
|
||||
if not self.env.context.get('force_fw') and self.source.fw_policy != 'skipci' \
|
||||
and (invalid := batch.prs.filtered(lambda p: p.state not in ['validated', 'ready'])):
|
||||
_logger.info(
|
||||
'-> wrong state %s (%s)',
|
||||
batch,
|
||||
', '.join(f"{p.display_name}: {p.state}" for p in invalid),
|
||||
)
|
||||
continue
|
||||
|
||||
# check if we've already forward-ported this branch
|
||||
next_target = self._find_next_targets()
|
||||
if not next_target:
|
||||
_logger.info("-> forward port done (no next target)")
|
||||
continue
|
||||
if len(next_target) > 1:
|
||||
_logger.error(
|
||||
"-> cancelling forward-port of %s (%s): inconsistent next target branch (%s)",
|
||||
batch,
|
||||
prs,
|
||||
', '.join(next_target.mapped('name')),
|
||||
)
|
||||
|
||||
if n := self.search([
|
||||
('target', '=', next_target.id),
|
||||
('parent_id', '=', batch.id),
|
||||
], limit=1):
|
||||
_logger.info('-> already forward-ported (%s)', n)
|
||||
continue
|
||||
|
||||
_logger.info("check pending port for %s (%s)", batch, prs)
|
||||
if self.env['forwardport.batches'].search_count([('batch_id', '=', batch.id)]):
|
||||
_logger.warning('-> already recorded')
|
||||
continue
|
||||
|
||||
_logger.info('-> ok')
|
||||
self.env['forwardport.batches'].create({
|
||||
'batch_id': batch.id,
|
||||
'source': 'fp',
|
||||
})
|
||||
scheduled |= batch
|
||||
return scheduled
|
||||
|
||||
def _find_next_target(self):
|
||||
"""Retrieves the next target from every PR, and returns it if it's the
|
||||
same for all the PRs which have one (PRs without a next target are
|
||||
ignored, this is considered acceptable).
|
||||
|
||||
If the next targets are inconsistent, returns no next target.
|
||||
"""
|
||||
next_target = self._find_next_targets()
|
||||
if len(next_target) == 1:
|
||||
return next_target
|
||||
else:
|
||||
return self.env['runbot_merge.branch'].browse(())
|
||||
|
||||
def _find_next_targets(self):
|
||||
return self.prs.mapped(lambda p: p._find_next_target() or self.env['runbot_merge.branch'])
|
||||
|
||||
def write(self, vals):
|
||||
if vals.get('merge_date'):
|
||||
# TODO: remove condition when everything is merged
|
||||
remover = self.env.get('forwardport.branch_remover')
|
||||
if remover is not None:
|
||||
remover.create([
|
||||
{'pr_id': p.id}
|
||||
for b in self
|
||||
if not b.merge_date
|
||||
for p in b.prs
|
||||
])
|
||||
|
||||
if vals.get('fw_policy') == 'skipci':
|
||||
nonskip = self.filtered(lambda b: b.fw_policy != 'skipci')
|
||||
else:
|
||||
nonskip = self.browse(())
|
||||
super().write(vals)
|
||||
|
||||
# if we change the policy to skip CI, schedule followups on merged
|
||||
# batches which were not previously marked as skipping CI
|
||||
if nonskip:
|
||||
toggled = nonskip.filtered(lambda b: b.merge_date)
|
||||
tips = toggled.mapped(lambda b: b.genealogy_ids[-1:])
|
||||
for tip in tips:
|
||||
tip._schedule_fp_followup()
|
||||
|
||||
return True
|
||||
|
||||
@api.ondelete(at_uninstall=True)
|
||||
def _on_delete_clear_stagings(self):
|
||||
self.batch_staging_ids.unlink()
|
||||
|
||||
def unlink(self):
|
||||
"""
|
||||
batches can be unlinked if they:
|
||||
|
||||
- have run out of PRs
|
||||
- and don't have a parent batch (which is not being deleted)
|
||||
- and don't have a child batch (which is not being deleted)
|
||||
|
||||
this is to keep track of forward port histories at the batch level
|
||||
"""
|
||||
unlinkable = self.filtered(
|
||||
lambda b: not (b.prs or (b.parent_id - self) or (self.search([('parent_id', '=', b.id)]) - self))
|
||||
)
|
||||
return super(Batch, unlinkable).unlink()
|
310
runbot_merge/models/commands.py
Normal file
310
runbot_merge/models/commands.py
Normal file
@ -0,0 +1,310 @@
|
||||
import enum
|
||||
from collections.abc import Iterator
|
||||
from dataclasses import dataclass, field
|
||||
from functools import partial
|
||||
from operator import contains
|
||||
from typing import Callable, List, Optional, Union
|
||||
|
||||
|
||||
def tokenize(line: str) -> Iterator[str]:
|
||||
cur = ''
|
||||
for c in line:
|
||||
if c == '-' and not cur:
|
||||
yield '-'
|
||||
elif c in ' \t+=,':
|
||||
if cur:
|
||||
yield cur
|
||||
cur = ''
|
||||
if not c.isspace():
|
||||
yield c
|
||||
else:
|
||||
cur += c
|
||||
|
||||
if cur:
|
||||
yield cur
|
||||
|
||||
|
||||
def normalize(it: Iterator[str]) -> Iterator[str]:
|
||||
"""Converts shorthand tokens to expanded version
|
||||
"""
|
||||
for t in it:
|
||||
match t:
|
||||
case 'r':
|
||||
yield 'review'
|
||||
case 'r-':
|
||||
yield 'review'
|
||||
yield '-'
|
||||
case _:
|
||||
yield t
|
||||
|
||||
|
||||
@dataclass
|
||||
class Peekable(Iterator[str]):
|
||||
it: Iterator[str]
|
||||
memo: Optional[str] = None
|
||||
|
||||
def __iter__(self) -> Iterator[str]:
|
||||
return self
|
||||
|
||||
def __next__(self) -> str:
|
||||
if self.memo is not None:
|
||||
v, self.memo = self.memo, None
|
||||
return v
|
||||
return next(self.it)
|
||||
|
||||
def peek(self) -> Optional[str]:
|
||||
if self.memo is None:
|
||||
self.memo = next(self.it, None)
|
||||
return self.memo
|
||||
|
||||
|
||||
class CommandError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Approve:
|
||||
def __init__(self, ids: Optional[List[int]] = None) -> None:
|
||||
self.ids = ids
|
||||
|
||||
def __str__(self) -> str:
|
||||
if self.ids is not None:
|
||||
ids = ','.join(map(str, self.ids))
|
||||
return f"r={ids}"
|
||||
return 'review+'
|
||||
|
||||
|
||||
class Reject:
|
||||
def __str__(self) -> str:
|
||||
return 'review-'
|
||||
|
||||
|
||||
class MergeMethod(enum.Enum):
|
||||
SQUASH = 'squash'
|
||||
REBASE_FF = 'rebase-ff'
|
||||
REBASE_MERGE = 'rebase-merge'
|
||||
MERGE = 'merge'
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.value
|
||||
|
||||
|
||||
class Retry:
|
||||
def __str__(self) -> str:
|
||||
return 'retry'
|
||||
|
||||
|
||||
class Check:
|
||||
def __str__(self) -> str:
|
||||
return 'check'
|
||||
|
||||
|
||||
@dataclass
|
||||
class Override:
|
||||
statuses: List[str] = field(default_factory=list)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"override={','.join(self.statuses)}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Delegate:
|
||||
users: List[str] = field(default_factory=list)
|
||||
|
||||
def __str__(self) -> str:
|
||||
if not self.users:
|
||||
return 'delegate+'
|
||||
return f"delegate={','.join(self.users)}"
|
||||
|
||||
|
||||
class Priority(enum.Enum):
|
||||
DEFAULT = enum.auto()
|
||||
PRIORITY = enum.auto()
|
||||
ALONE = enum.auto()
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.name.lower()
|
||||
|
||||
|
||||
class CancelStaging:
|
||||
def __str__(self) -> str:
|
||||
return "cancel=staging"
|
||||
|
||||
|
||||
class SkipChecks:
|
||||
def __str__(self) -> str:
|
||||
return 'skipchecks'
|
||||
|
||||
|
||||
class FW(enum.Enum):
|
||||
DEFAULT = enum.auto()
|
||||
SKIPCI = enum.auto()
|
||||
SKIPMERGE = enum.auto()
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f'fw={self.name.lower()}'
|
||||
|
||||
|
||||
@dataclass
|
||||
class Limit:
|
||||
branch: Optional[str]
|
||||
|
||||
def __str__(self) -> str:
|
||||
if self.branch is None:
|
||||
return 'ignore'
|
||||
return f'up to {self.branch}'
|
||||
|
||||
|
||||
class Close:
|
||||
def __str__(self) -> str:
|
||||
return 'close'
|
||||
|
||||
|
||||
Command = Union[
|
||||
Approve,
|
||||
CancelStaging,
|
||||
Close,
|
||||
Check,
|
||||
Delegate,
|
||||
FW,
|
||||
Limit,
|
||||
MergeMethod,
|
||||
Override,
|
||||
Priority,
|
||||
Reject,
|
||||
Retry,
|
||||
SkipChecks,
|
||||
]
|
||||
|
||||
|
||||
class Parser:
|
||||
def __init__(self, line: str) -> None:
|
||||
self.it = Peekable(normalize(tokenize(line)))
|
||||
|
||||
def __iter__(self) -> Iterator[Command]:
|
||||
for token in self.it:
|
||||
if token.startswith("NOW"):
|
||||
# any number of ! is allowed
|
||||
if token.startswith("NOW!"):
|
||||
yield Priority.ALONE
|
||||
elif token == "NOW":
|
||||
yield Priority.PRIORITY
|
||||
else:
|
||||
raise CommandError(f"unknown command {token!r}")
|
||||
yield SkipChecks()
|
||||
yield CancelStaging()
|
||||
continue
|
||||
|
||||
handler = getattr(type(self), f'parse_{token.replace("-", "_")}', None)
|
||||
if handler:
|
||||
yield handler(self)
|
||||
elif '!' in token:
|
||||
raise CommandError("skill issue, noob")
|
||||
else:
|
||||
raise CommandError(f"unknown command {token!r}")
|
||||
|
||||
def assert_next(self, val: str) -> None:
|
||||
if (actual := next(self.it, None)) != val:
|
||||
raise CommandError(f"expected {val!r}, got {actual!r}")
|
||||
|
||||
def check_next(self, val: str) -> bool:
|
||||
if self.it.peek() == val:
|
||||
self.it.memo = None # consume peeked value
|
||||
return True
|
||||
return False
|
||||
|
||||
def parse_review(self) -> Union[Approve, Reject]:
|
||||
t = next(self.it, None)
|
||||
if t == '+':
|
||||
return Approve()
|
||||
if t == '-':
|
||||
return Reject()
|
||||
if t == '=':
|
||||
t = next(self.it, None)
|
||||
if not (t and t.isdecimal()):
|
||||
raise CommandError(f"expected PR ID to approve, found {t!r}")
|
||||
|
||||
ids = [int(t)]
|
||||
while self.check_next(','):
|
||||
id = next(self.it, None)
|
||||
if id and id.isdecimal():
|
||||
ids.append(int(id))
|
||||
else:
|
||||
raise CommandError(f"expected PR ID to approve, found {id!r}")
|
||||
return Approve(ids)
|
||||
|
||||
raise CommandError(f"unknown review {t!r}")
|
||||
|
||||
def parse_squash(self) -> MergeMethod:
|
||||
return MergeMethod.SQUASH
|
||||
|
||||
def parse_rebase_ff(self) -> MergeMethod:
|
||||
return MergeMethod.REBASE_FF
|
||||
|
||||
def parse_rebase_merge(self) -> MergeMethod:
|
||||
return MergeMethod.REBASE_MERGE
|
||||
|
||||
def parse_merge(self) -> MergeMethod:
|
||||
return MergeMethod.MERGE
|
||||
|
||||
def parse_retry(self) -> Retry:
|
||||
return Retry()
|
||||
|
||||
def parse_check(self) -> Check:
|
||||
return Check()
|
||||
|
||||
def parse_override(self) -> Override:
|
||||
self.assert_next('=')
|
||||
ci = [next(self.it)]
|
||||
while self.check_next(','):
|
||||
ci.append(next(self.it))
|
||||
return Override(ci)
|
||||
|
||||
def parse_delegate(self) -> Delegate:
|
||||
match next(self.it, None):
|
||||
case '+':
|
||||
return Delegate()
|
||||
case '=':
|
||||
delegates = [next(self.it).lstrip('#@')]
|
||||
while self.check_next(','):
|
||||
delegates.append(next(self.it).lstrip('#@'))
|
||||
return Delegate(delegates)
|
||||
case d:
|
||||
raise CommandError(f"unknown delegation {d!r}")
|
||||
|
||||
def parse_default(self) -> Priority:
|
||||
return Priority.DEFAULT
|
||||
|
||||
def parse_priority(self) -> Priority:
|
||||
return Priority.PRIORITY
|
||||
|
||||
def parse_alone(self) -> Priority:
|
||||
return Priority.ALONE
|
||||
|
||||
def parse_cancel(self) -> CancelStaging:
|
||||
self.assert_next('=')
|
||||
self.assert_next('staging')
|
||||
return CancelStaging()
|
||||
|
||||
def parse_skipchecks(self) -> SkipChecks:
|
||||
return SkipChecks()
|
||||
|
||||
def parse_fw(self) -> FW:
|
||||
self.assert_next('=')
|
||||
f = next(self.it, "")
|
||||
try:
|
||||
return FW[f.upper()]
|
||||
except KeyError:
|
||||
raise CommandError(f"unknown fw configuration {f or None!r}") from None
|
||||
|
||||
def parse_ignore(self) -> Limit:
|
||||
return Limit(None)
|
||||
|
||||
def parse_up(self) -> Limit:
|
||||
self.assert_next('to')
|
||||
if limit := next(self.it, None):
|
||||
return Limit(limit)
|
||||
else:
|
||||
raise CommandError("please provide a branch to forward-port to.")
|
||||
|
||||
def parse_close(self) -> Close:
|
||||
return Close()
|
@ -1,11 +1,14 @@
|
||||
import logging
|
||||
import re
|
||||
from typing import List
|
||||
|
||||
import requests
|
||||
import sentry_sdk
|
||||
|
||||
from odoo import models, fields, api
|
||||
from odoo.exceptions import UserError
|
||||
from odoo.osv import expression
|
||||
from odoo.tools import reverse_order
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
class Project(models.Model):
|
||||
@ -23,6 +26,12 @@ class Project(models.Model):
|
||||
help="Branches of all project's repos which are managed by the merge bot. Also "\
|
||||
"target branches of PR this project handles."
|
||||
)
|
||||
staging_enabled = fields.Boolean(default=True)
|
||||
staging_priority = fields.Selection([
|
||||
('default', "Splits over ready PRs"),
|
||||
('largest', "Largest of split and ready PRs"),
|
||||
('ready', "Ready PRs over split"),
|
||||
], default="default", required=True)
|
||||
|
||||
ci_timeout = fields.Integer(
|
||||
default=60, required=True, group_operator=None,
|
||||
@ -36,8 +45,10 @@ class Project(models.Model):
|
||||
required=True,
|
||||
default="hanson", # mergebot du bot du bot du~
|
||||
help="Prefix (~bot name) used when sending commands from PR "
|
||||
"comments e.g. [hanson retry] or [hanson r+ p=1]",
|
||||
"comments e.g. [hanson retry] or [hanson r+ priority]",
|
||||
)
|
||||
fp_github_token = fields.Char()
|
||||
fp_github_name = fields.Char(store=True, compute="_compute_git_identity")
|
||||
|
||||
batch_limit = fields.Integer(
|
||||
default=8, group_operator=None, help="Maximum number of PRs staged together")
|
||||
@ -96,6 +107,24 @@ class Project(models.Model):
|
||||
if not project.github_email:
|
||||
raise UserError("The merge bot needs a public or accessible primary email set up.")
|
||||
|
||||
# technically the email could change at any moment...
|
||||
@api.depends('fp_github_token')
|
||||
def _compute_git_identity(self):
|
||||
s = requests.Session()
|
||||
for project in self:
|
||||
if project.fp_github_name or not project.fp_github_token:
|
||||
continue
|
||||
|
||||
r0 = s.get('https://api.github.com/user', headers={
|
||||
'Authorization': 'token %s' % project.fp_github_token
|
||||
})
|
||||
if not r0.ok:
|
||||
_logger.error("Failed to fetch forward bot information for project %s: %s", project.name, r0.text or r0.content)
|
||||
continue
|
||||
|
||||
user = r0.json()
|
||||
project.fp_github_name = user['name'] or user['login']
|
||||
|
||||
def _check_stagings(self, commit=False):
|
||||
# check branches with an active staging
|
||||
for branch in self.env['runbot_merge.branch']\
|
||||
@ -120,6 +149,7 @@ class Project(models.Model):
|
||||
('active_staging_id', '=', False),
|
||||
('active', '=', True),
|
||||
('staging_enabled', '=', True),
|
||||
('project_id.staging_enabled', '=', True),
|
||||
]):
|
||||
try:
|
||||
with self.env.cr.savepoint(), \
|
||||
@ -132,9 +162,17 @@ class Project(models.Model):
|
||||
if commit:
|
||||
self.env.cr.commit()
|
||||
|
||||
def _find_commands(self, comment):
|
||||
def _find_commands(self, comment: str) -> List[str]:
|
||||
"""Tries to find all the lines starting (ignoring leading whitespace)
|
||||
with either the merge or the forward port bot identifiers.
|
||||
|
||||
For convenience, the identifier *can* be prefixed with an ``@`` or
|
||||
``#``, and suffixed with a ``:``.
|
||||
"""
|
||||
# horizontal whitespace (\s - {\n, \r}), but Python doesn't have \h or \p{Blank}
|
||||
h = r'[^\S\r\n]'
|
||||
return re.findall(
|
||||
'^\s*[@|#]?{}:? (.*)$'.format(self.github_prefix),
|
||||
fr'^{h}*[@|#]?{self.github_prefix}(?:{h}+|:{h}*)(.*)$',
|
||||
comment, re.MULTILINE | re.IGNORECASE)
|
||||
|
||||
def _has_branch(self, name):
|
||||
@ -181,3 +219,10 @@ class Project(models.Model):
|
||||
]
|
||||
})
|
||||
return w.action_open()
|
||||
|
||||
def _forward_port_ordered(self, domain=()):
|
||||
Branches = self.env['runbot_merge.branch']
|
||||
return Branches.search(expression.AND([
|
||||
[('project_id', '=', self.id)],
|
||||
domain or [],
|
||||
]), order=reverse_order(Branches._order))
|
||||
|
@ -218,7 +218,8 @@ class FreezeWizard(models.Model):
|
||||
}
|
||||
for repo, copy in repos.items():
|
||||
copy.fetch(git.source_url(repo, 'github'), '+refs/heads/*:refs/heads/*')
|
||||
for pr in self.release_pr_ids.pr_id | self.bump_pr_ids.pr_id:
|
||||
all_prs = self.release_pr_ids.pr_id | self.bump_pr_ids.pr_id
|
||||
for pr in all_prs:
|
||||
repos[pr.repository].fetch(
|
||||
git.source_url(pr.repository, 'github'),
|
||||
pr.head,
|
||||
@ -266,6 +267,11 @@ class FreezeWizard(models.Model):
|
||||
bump.pr_id.display_name, prev, len(commits))
|
||||
bump_heads[repo_id] = repos[repo_id].rebase(prev, commits)[0]
|
||||
|
||||
# prevent concurrent updates to the commits table so we control the
|
||||
# creation of commit objects from rebasing the release & bump PRs, do it
|
||||
# only just before *pushing*
|
||||
self.env.cr.execute("LOCK runbot_merge_commit IN ACCESS EXCLUSIVE MODE NOWAIT")
|
||||
|
||||
deployed = {}
|
||||
# at this point we've got a bunch of tmp branches with merged release
|
||||
# and bump PRs, it's time to update the corresponding targets
|
||||
@ -338,8 +344,82 @@ class FreezeWizard(models.Model):
|
||||
f"Unable to {reason} branch {repo}:{branch}.{addendum}"
|
||||
)
|
||||
|
||||
all_prs = self.release_pr_ids.pr_id | self.bump_pr_ids.pr_id
|
||||
all_prs.state = 'merged'
|
||||
b = self.env['runbot_merge.branch'].search([('name', '=', self.branch_name)])
|
||||
self.env.cr.execute(
|
||||
"UPDATE runbot_merge_batch SET target=%s WHERE id = %s;"
|
||||
"UPDATE runbot_merge_pull_requests SET target=%s WHERE id = any(%s)",
|
||||
[
|
||||
b.id, self.release_pr_ids.pr_id.batch_id.id,
|
||||
b.id, self.release_pr_ids.pr_id.ids,
|
||||
]
|
||||
)
|
||||
all_prs.batch_id.merge_date = fields.Datetime.now()
|
||||
all_prs.reviewed_by = self.env.user.partner_id.id
|
||||
for p in all_prs:
|
||||
p.commits_map = json.dumps({
|
||||
'': deployed[p.id],
|
||||
p.head: deployed[p.id]
|
||||
})
|
||||
|
||||
# stagings have to be created conditionally as otherwise we might not
|
||||
# have a `target` to set and it's mandatory
|
||||
laster = self.env['runbot_merge.stagings'].search(
|
||||
[('target', '=', master.id), ('state', '=', 'success')],
|
||||
order='id desc',
|
||||
limit=1,
|
||||
).commits.mapped(lambda c: (c.repository_id, c.commit_id))
|
||||
if self.release_pr_ids:
|
||||
rel_items = [(0, 0, {
|
||||
'repository_id': repo.id,
|
||||
'commit_id': self.env['runbot_merge.commit'].create({
|
||||
'sha': sha,
|
||||
'to_check': False,
|
||||
}).id,
|
||||
} if (sha := rel_heads.get(repo)) else {
|
||||
'repository_id': repo.id,
|
||||
'commit_id': commit.id,
|
||||
})
|
||||
for repo, commit in laster
|
||||
]
|
||||
self.env['runbot_merge.stagings'].create([{
|
||||
'state': 'success',
|
||||
'reason': 'release freeze staging',
|
||||
'active': False,
|
||||
'target': b.id,
|
||||
'staging_batch_ids': [
|
||||
(0, 0, {'runbot_merge_batch_id': batch.id})
|
||||
for batch in self.release_pr_ids.pr_id.batch_id
|
||||
],
|
||||
'heads': rel_items,
|
||||
'commits': rel_items,
|
||||
}])
|
||||
|
||||
if self.bump_pr_ids:
|
||||
bump_items = [(0, 0, {
|
||||
'repository_id': repo.id,
|
||||
'commit_id': self.env['runbot_merge.commit'].create({
|
||||
'sha': sha,
|
||||
'to_check': False,
|
||||
}).id,
|
||||
} if (sha := bump_heads.get(repo)) else {
|
||||
'repository_id': repo.id,
|
||||
'commit_id': commit.id,
|
||||
})
|
||||
for repo, commit in laster
|
||||
]
|
||||
self.env['runbot_merge.stagings'].create([{
|
||||
'state': 'success',
|
||||
'reason': 'bump freeze staging',
|
||||
'active': False,
|
||||
'target': master.id,
|
||||
'staging_batch_ids': [
|
||||
(0, 0, {'runbot_merge_batch_id': batch.id})
|
||||
for batch in self.bump_pr_ids.pr_id.batch_id
|
||||
],
|
||||
'heads': bump_items,
|
||||
'commits': bump_items,
|
||||
}])
|
||||
|
||||
self.env['runbot_merge.pull_requests.feedback'].create([{
|
||||
'repository': pr.repository.id,
|
||||
'pull_request': pr.number,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -6,16 +6,17 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from collections.abc import Mapping
|
||||
from difflib import Differ
|
||||
from itertools import takewhile
|
||||
from operator import itemgetter
|
||||
from typing import Dict, Union, Optional, Literal, Callable, Iterator, Tuple, List, TypeAlias
|
||||
|
||||
from werkzeug.datastructures import Headers
|
||||
|
||||
from odoo import api, models, fields
|
||||
from odoo.tools import OrderedSet
|
||||
from .pull_requests import Branch, Stagings, PullRequests, Repository, Batch
|
||||
from odoo import api, models, fields, Command
|
||||
from odoo.tools import OrderedSet, groupby
|
||||
from .pull_requests import Branch, Stagings, PullRequests, Repository
|
||||
from .batch import Batch
|
||||
from .. import exceptions, utils, github, git
|
||||
|
||||
WAIT_FOR_VISIBILITY = [10, 10, 10, 10]
|
||||
@ -56,32 +57,48 @@ def try_staging(branch: Branch) -> Optional[Stagings]:
|
||||
if branch.active_staging_id:
|
||||
return None
|
||||
|
||||
rows = [
|
||||
(p, prs)
|
||||
for p, prs in ready_prs(for_branch=branch)
|
||||
if not any(prs.mapped('blocked'))
|
||||
]
|
||||
if not rows:
|
||||
def log(label: str, batches: Batch) -> None:
|
||||
_logger.info(label, ', '.join(batches.mapped('prs.display_name')))
|
||||
|
||||
alone, batches = ready_batches(for_branch=branch)
|
||||
|
||||
if alone:
|
||||
log("staging high-priority PRs %s", batches)
|
||||
elif branch.project_id.staging_priority == 'default':
|
||||
if split := branch.split_ids[:1]:
|
||||
batches = split.batch_ids
|
||||
split.unlink()
|
||||
log("staging split PRs %s (prioritising splits)", batches)
|
||||
else:
|
||||
# priority, normal; priority = sorted ahead of normal, so always picked
|
||||
# first as long as there's room
|
||||
log("staging ready PRs %s (prioritising splits)", batches)
|
||||
elif branch.project_id.staging_priority == 'ready':
|
||||
if batches:
|
||||
log("staging ready PRs %s (prioritising ready)", batches)
|
||||
else:
|
||||
split = branch.split_ids[:1]
|
||||
batches = split.batch_ids
|
||||
split.unlink()
|
||||
log("staging split PRs %s (prioritising ready)", batches)
|
||||
else:
|
||||
assert branch.project_id.staging_priority == 'largest'
|
||||
maxsplit = max(branch.split_ids, key=lambda s: len(s.batch_ids), default=branch.env['runbot_merge.split'])
|
||||
_logger.info("largest split = %d, ready = %d", len(maxsplit.batch_ids), len(batches))
|
||||
# bias towards splits if len(ready) = len(batch_ids)
|
||||
if len(maxsplit.batch_ids) >= len(batches):
|
||||
batches = maxsplit.batch_ids
|
||||
maxsplit.unlink()
|
||||
log("staging split PRs %s (prioritising largest)", batches)
|
||||
else:
|
||||
log("staging ready PRs %s (prioritising largest)", batches)
|
||||
|
||||
if not batches:
|
||||
return
|
||||
|
||||
priority = rows[0][0]
|
||||
if priority == 0 or priority == 1:
|
||||
# p=0 take precedence over all else
|
||||
# p=1 allows merging a fix inside / ahead of a split (e.g. branch
|
||||
# is broken or widespread false positive) without having to cancel
|
||||
# the existing staging
|
||||
batched_prs = [pr_ids for _, pr_ids in takewhile(lambda r: r[0] == priority, rows)]
|
||||
elif branch.split_ids:
|
||||
split_ids = branch.split_ids[0]
|
||||
_logger.info("Found split of PRs %s, re-staging", split_ids.mapped('batch_ids.prs'))
|
||||
batched_prs = [batch.prs for batch in split_ids.batch_ids]
|
||||
split_ids.unlink()
|
||||
else: # p=2
|
||||
batched_prs = [pr_ids for _, pr_ids in takewhile(lambda r: r[0] == priority, rows)]
|
||||
original_heads, staging_state = staging_setup(branch, batches)
|
||||
|
||||
original_heads, staging_state = staging_setup(branch, batched_prs)
|
||||
|
||||
staged = stage_batches(branch, batched_prs, staging_state)
|
||||
staged = stage_batches(branch, batches, staging_state)
|
||||
|
||||
if not staged:
|
||||
return None
|
||||
@ -148,7 +165,7 @@ For-Commit-Id: {it.head}
|
||||
# create actual staging object
|
||||
st: Stagings = env['runbot_merge.stagings'].create({
|
||||
'target': branch.id,
|
||||
'batch_ids': [(4, batch.id, 0) for batch in staged],
|
||||
'staging_batch_ids': [Command.create({'runbot_merge_batch_id': batch.id}) for batch in staged],
|
||||
'heads': heads,
|
||||
'commits': commits,
|
||||
})
|
||||
@ -171,36 +188,33 @@ For-Commit-Id: {it.head}
|
||||
return st
|
||||
|
||||
|
||||
def ready_prs(for_branch: Branch) -> List[Tuple[int, PullRequests]]:
|
||||
def ready_batches(for_branch: Branch) -> Tuple[bool, Batch]:
|
||||
env = for_branch.env
|
||||
# splits are ready by definition, we need to exclude them from the ready
|
||||
# rows otherwise if a prioritised (alone) PR is part of a split it'll be
|
||||
# staged through priority *and* through split.
|
||||
split_ids = for_branch.split_ids.batch_ids.ids
|
||||
env.cr.execute("""
|
||||
SELECT
|
||||
min(pr.priority) as priority,
|
||||
array_agg(pr.id) AS match
|
||||
FROM runbot_merge_pull_requests pr
|
||||
WHERE pr.target = any(%s)
|
||||
-- exclude terminal states (so there's no issue when
|
||||
-- deleting branches & reusing labels)
|
||||
AND pr.state != 'merged'
|
||||
AND pr.state != 'closed'
|
||||
GROUP BY
|
||||
pr.target,
|
||||
CASE
|
||||
WHEN pr.label SIMILAR TO '%%:patch-[[:digit:]]+'
|
||||
THEN pr.id::text
|
||||
ELSE pr.label
|
||||
END
|
||||
HAVING
|
||||
bool_or(pr.state = 'ready') or bool_or(pr.priority = 0)
|
||||
ORDER BY min(pr.priority), min(pr.id)
|
||||
""", [for_branch.ids])
|
||||
browse = env['runbot_merge.pull_requests'].browse
|
||||
return [(p, browse(ids)) for p, ids in env.cr.fetchall()]
|
||||
SELECT max(priority)
|
||||
FROM runbot_merge_batch
|
||||
WHERE blocked IS NULL AND target = %s AND NOT id = any(%s)
|
||||
""", [for_branch.id, split_ids])
|
||||
alone = env.cr.fetchone()[0] == 'alone'
|
||||
|
||||
return (
|
||||
alone,
|
||||
env['runbot_merge.batch'].search([
|
||||
('target', '=', for_branch.id),
|
||||
('blocked', '=', False),
|
||||
('priority', '=', 'alone') if alone else (1, '=', 1),
|
||||
('id', 'not in', split_ids),
|
||||
], order="priority DESC, id ASC"),
|
||||
)
|
||||
|
||||
|
||||
def staging_setup(
|
||||
target: Branch,
|
||||
batched_prs: List[PullRequests],
|
||||
batches: Batch,
|
||||
) -> Tuple[Dict[Repository, str], StagingState]:
|
||||
"""Sets up the staging:
|
||||
|
||||
@ -208,7 +222,9 @@ def staging_setup(
|
||||
- creates tmp branch via gh API (to remove)
|
||||
- generates working copy for each repository with the target branch
|
||||
"""
|
||||
all_prs: PullRequests = target.env['runbot_merge.pull_requests'].concat(*batched_prs)
|
||||
by_repo: Mapping[Repository, List[PullRequests]] = \
|
||||
dict(groupby(batches.prs, lambda p: p.repository))
|
||||
|
||||
staging_state = {}
|
||||
original_heads = {}
|
||||
for repo in target.project_id.repo_ids.having_branch(target):
|
||||
@ -224,7 +240,7 @@ def staging_setup(
|
||||
# be hooked only to "proper" remote-tracking branches
|
||||
# (in `refs/remotes`), it doesn't seem to work here
|
||||
f'+refs/heads/{target.name}:refs/heads/{target.name}',
|
||||
*(pr.head for pr in all_prs if pr.repository == repo)
|
||||
*(pr.head for pr in by_repo.get(repo, []))
|
||||
)
|
||||
original_heads[repo] = head
|
||||
staging_state[repo] = StagingSlice(gh=gh, head=head, repo=source.stdout().with_config(text=True, check=False))
|
||||
@ -232,14 +248,13 @@ def staging_setup(
|
||||
return original_heads, staging_state
|
||||
|
||||
|
||||
def stage_batches(branch: Branch, batched_prs: List[PullRequests], staging_state: StagingState) -> Stagings:
|
||||
def stage_batches(branch: Branch, batches: Batch, staging_state: StagingState) -> Stagings:
|
||||
batch_limit = branch.project_id.batch_limit
|
||||
env = branch.env
|
||||
staged = env['runbot_merge.batch']
|
||||
for batch in batched_prs:
|
||||
for batch in batches:
|
||||
if len(staged) >= batch_limit:
|
||||
break
|
||||
|
||||
try:
|
||||
staged |= stage_batch(env, batch, staging_state)
|
||||
except exceptions.MergeError as e:
|
||||
@ -290,16 +305,18 @@ def parse_refs_smart(read: Callable[[int], bytes]) -> Iterator[Tuple[str, str]]:
|
||||
UNCHECKABLE = ['merge_method', 'overrides', 'draft']
|
||||
|
||||
|
||||
def stage_batch(env: api.Environment, prs: PullRequests, staging: StagingState) -> Batch:
|
||||
def stage_batch(env: api.Environment, batch: Batch, staging: StagingState):
|
||||
"""Stages the batch represented by the ``prs`` recordset, onto the
|
||||
current corresponding staging heads.
|
||||
|
||||
Alongside returning the newly created batch, updates ``staging[*].head``
|
||||
in-place on success. On failure, the heads should not be touched.
|
||||
|
||||
May return an empty recordset on some non-fatal failures.
|
||||
"""
|
||||
new_heads: Dict[PullRequests, str] = {}
|
||||
pr_fields = env['runbot_merge.pull_requests']._fields
|
||||
for pr in prs:
|
||||
for pr in batch.prs:
|
||||
info = staging[pr.repository]
|
||||
_logger.info(
|
||||
"Staging pr %s for target %s; method=%s",
|
||||
@ -308,7 +325,7 @@ def stage_batch(env: api.Environment, prs: PullRequests, staging: StagingState)
|
||||
)
|
||||
|
||||
try:
|
||||
method, new_heads[pr] = stage(pr, info, related_prs=(prs - pr))
|
||||
method, new_heads[pr] = stage(pr, info, related_prs=(batch.prs - pr))
|
||||
_logger.info(
|
||||
"Staged pr %s to %s by %s: %s -> %s",
|
||||
pr.display_name, pr.target.name, method,
|
||||
@ -337,10 +354,7 @@ def stage_batch(env: api.Environment, prs: PullRequests, staging: StagingState)
|
||||
# update meta to new heads
|
||||
for pr, head in new_heads.items():
|
||||
staging[pr.repository].head = head
|
||||
return env['runbot_merge.batch'].create({
|
||||
'target': prs[0].target.id,
|
||||
'prs': [(4, pr.id, 0) for pr in prs],
|
||||
})
|
||||
return batch
|
||||
|
||||
def format_for_difflib(items: Iterator[Tuple[str, object]]) -> Iterator[str]:
|
||||
""" Bit of a pain in the ass because difflib really wants
|
||||
@ -408,7 +422,7 @@ def stage(pr: PullRequests, info: StagingSlice, related_prs: PullRequests) -> Tu
|
||||
diff.append(('Message', pr.message, msg))
|
||||
|
||||
if invalid:
|
||||
pr.write({**invalid, 'state': 'opened', 'head': pr_head})
|
||||
pr.write({**invalid, 'reviewed_by': False, 'head': pr_head})
|
||||
raise exceptions.Mismatch(invalid, diff)
|
||||
|
||||
if pr.reviewed_by and pr.reviewed_by.name == pr.reviewed_by.github_login:
|
||||
|
6
runbot_merge/models/utils.py
Normal file
6
runbot_merge/models/utils.py
Normal file
@ -0,0 +1,6 @@
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
def enum(model: str, field: str) -> Tuple[str, str]:
|
||||
n = f'{model.replace(".", "_")}_{field}_type'
|
||||
return n, n
|
@ -16,6 +16,7 @@ access_runbot_merge_stagings_commits_admin,Admin access to staging commits,model
|
||||
access_runbot_merge_stagings_cancel_admin,Admin access to cancelling stagings,model_runbot_merge_stagings_cancel,runbot_merge.group_admin,1,1,1,1
|
||||
access_runbot_merge_split_admin,Admin access to splits,model_runbot_merge_split,runbot_merge.group_admin,1,1,1,1
|
||||
access_runbot_merge_batch_admin,Admin access to batches,model_runbot_merge_batch,runbot_merge.group_admin,1,1,1,1
|
||||
access_runbot_merge_staging_batch_admin,Admin access to batch/staging link,model_runbot_merge_staging_batch,runbot_merge.group_admin,1,1,1,1
|
||||
access_runbot_merge_fetch_job_admin,Admin access to fetch jobs,model_runbot_merge_fetch_job,runbot_merge.group_admin,1,1,1,1
|
||||
access_runbot_merge_pull_requests_feedback_admin,Admin access to feedback,model_runbot_merge_pull_requests_feedback,runbot_merge.group_admin,1,1,1,1
|
||||
access_runbot_merge_review_rights,Admin access to review permissions,model_res_partner_review,runbot_merge.group_admin,1,1,1,1
|
||||
|
|
@ -14,27 +14,30 @@ h1, h2, h3, h4, h5, h6{
|
||||
margin-bottom: 0.33em;
|
||||
}
|
||||
h5 { font-size: 1em; }
|
||||
.bg-success, .bg-info, .bg-warning, .bg-danger, .bg-gray-lighter {
|
||||
.bg-success, .bg-info, .bg-warning, .bg-danger, .bg-gray-lighter,
|
||||
.table-success, .table-info, .table-warning, .table-danger {
|
||||
color: inherit;
|
||||
}
|
||||
.dropdown-item, .dropdown-menu, .dropdown-menu a {
|
||||
color: inherit;
|
||||
}
|
||||
.bg-success {
|
||||
background-color: #dff0d8 !important;
|
||||
|
||||
$mergebot-colors: ("success": #dff0d8, "danger": #f2dede, "warning": #fcf8e3, "info": #d9edf7);
|
||||
@each $category, $color in $mergebot-colors {
|
||||
.bg-#{$category} {
|
||||
background-color: $color !important;
|
||||
}
|
||||
.table-#{$category} {
|
||||
background-color: $color !important;
|
||||
&.table-active {
|
||||
background-color: scale-color($color, $lightness: -5%) !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
.bg-unmerged {
|
||||
background-color: #dcefe8 !important
|
||||
}
|
||||
.bg-info {
|
||||
background-color: #d9edf7 !important;
|
||||
}
|
||||
.bg-warning {
|
||||
background-color: #fcf8e3 !important;
|
||||
}
|
||||
.bg-danger {
|
||||
background-color: #f2dede !important;
|
||||
background-color: #f8f0e3 !important
|
||||
}
|
||||
|
||||
.list-inline {
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
@ -121,3 +124,16 @@ dl.runbot-merge-fields {
|
||||
// works better for the left edge of the *box*
|
||||
@extend .border-left;
|
||||
}
|
||||
|
||||
// batches sequence table in PR dashboard: mostly uses (customised) bootstrap
|
||||
// but some of the style is bespoke because inline styles don't work well with
|
||||
// CSP
|
||||
.closed {
|
||||
text-decoration: line-through;
|
||||
}
|
||||
tr.inactive {
|
||||
opacity: 0.5;
|
||||
}
|
||||
td.detached {
|
||||
border-top: 2px solid map-get($theme-colors, "danger");
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ import requests
|
||||
from lxml import html
|
||||
|
||||
import odoo
|
||||
from utils import _simple_init, seen, re_matches, get_partner, Commit, pr_page, to_pr, part_of
|
||||
from utils import _simple_init, seen, re_matches, get_partner, Commit, pr_page, to_pr, part_of, ensure_one
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@ -53,7 +53,6 @@ def test_trivial_flow(env, repo, page, users, config):
|
||||
)) == {
|
||||
'label': f"{config['github']['owner']}:other",
|
||||
'head': c1,
|
||||
'target': 'master',
|
||||
}
|
||||
|
||||
with repo:
|
||||
@ -121,6 +120,35 @@ def test_trivial_flow(env, repo, page, users, config):
|
||||
"\n\nSigned-off-by: {reviewer.formatted_email}"\
|
||||
.format(repo=repo, reviewer=get_partner(env, users['reviewer']))
|
||||
|
||||
# reverse because the messages are in newest-to-oldest by default
|
||||
# (as that's how you want to read them)
|
||||
messages = reversed([
|
||||
(m.author_id.display_name, m.body, list(zip(
|
||||
m.tracking_value_ids.get_old_display_value(),
|
||||
m.tracking_value_ids.get_new_display_value(),
|
||||
)))
|
||||
for m in pr_id.message_ids
|
||||
])
|
||||
|
||||
assert list(messages) == [
|
||||
('OdooBot', '<p>Pull Request created</p>', []),
|
||||
('OdooBot', f'<p>statuses changed on {c1}</p>', [('Opened', 'Validated')]),
|
||||
# reviewer approved changing the state and setting reviewer as reviewer
|
||||
# plus set merge method
|
||||
('Reviewer', '', [
|
||||
('Validated', 'Ready'),
|
||||
('', 'rebase and merge, using the PR as merge commit message'),
|
||||
('', 'Reviewer'),
|
||||
]),
|
||||
# staging succeeded
|
||||
(re_matches(r'.*'), f'<p>staging {st.id} succeeded</p>', [
|
||||
# set merge date
|
||||
(False, pr_id.merge_date + 'Z'),
|
||||
# updated state
|
||||
('Ready', 'Merged'),
|
||||
]),
|
||||
]
|
||||
|
||||
class TestCommitMessage:
|
||||
def test_commit_simple(self, env, repo, users, config):
|
||||
""" verify 'closes ...' is correctly added in the commit message
|
||||
@ -744,6 +772,7 @@ class TestPREdition:
|
||||
with repo: prx.base = '1.0'
|
||||
assert pr.target == branch_1
|
||||
assert not pr.staging_id, "updated the base of a staged PR should have unstaged it"
|
||||
assert st.state == 'cancelled', f"expected cancellation, got {st.state}"
|
||||
assert st.reason == f"{pr.display_name} target (base) branch was changed from 'master' to '1.0'"
|
||||
|
||||
with repo: prx.base = '2.0'
|
||||
@ -756,9 +785,17 @@ class TestPREdition:
|
||||
('number', '=', prx.number)
|
||||
]).target == branch_1
|
||||
|
||||
def test_retarget_update_commits(self, env, repo):
|
||||
""" Retargeting a PR should update its commits count
|
||||
def test_retarget_update_commits(self, env, project, repo):
|
||||
""" Retargeting a PR should update its commits count, as well as follow
|
||||
the new target's requirements
|
||||
"""
|
||||
project.repo_ids.write({
|
||||
'status_ids': [
|
||||
(5, 0, 0),
|
||||
(0, 0, {'context': 'a', 'branch_filter': [('name', '=', 'master')]}),
|
||||
(0, 0, {'context': 'b', 'branch_filter': [('name', '!=', 'master')]}),
|
||||
]
|
||||
})
|
||||
branch_1 = env['runbot_merge.branch'].create({
|
||||
'name': '1.0',
|
||||
'project_id': env['runbot_merge.project'].search([]).id,
|
||||
@ -767,29 +804,35 @@ class TestPREdition:
|
||||
|
||||
with repo:
|
||||
# master is 1 commit ahead of 1.0
|
||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
||||
repo.make_ref('heads/1.0', m)
|
||||
m2 = repo.make_commit(m, 'second', None, tree={'m': 'm2'})
|
||||
repo.make_ref('heads/master', m2)
|
||||
[m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref='heads/1.0')
|
||||
[m2] = repo.make_commits(m, Commit('second', tree={'m': 'm2'}), ref='heads/master')
|
||||
|
||||
# the PR builds on master, but is errorneously targeted to 1.0
|
||||
c = repo.make_commit(m2, 'first', None, tree={'m': 'm3'})
|
||||
prx = repo.make_pr(title='title', body='body', target='1.0', head=c)
|
||||
repo.make_commits(m2, Commit('first', tree={'m': 'm3'}), ref='heads/abranch')
|
||||
prx = repo.make_pr(title='title', body='body', target='1.0', head='abranch')
|
||||
repo.post_status('heads/abranch', 'success', 'a')
|
||||
env.run_crons()
|
||||
pr = env['runbot_merge.pull_requests'].search([
|
||||
('repository.name', '=', repo.name),
|
||||
('number', '=', prx.number)
|
||||
])
|
||||
assert not pr.squash
|
||||
assert pr.status == 'pending'
|
||||
assert pr.state == 'opened'
|
||||
|
||||
with repo:
|
||||
prx.base = 'master'
|
||||
assert pr.target == master
|
||||
assert pr.squash
|
||||
assert pr.status == 'success'
|
||||
assert pr.state == 'validated'
|
||||
|
||||
with repo:
|
||||
prx.base = '1.0'
|
||||
assert pr.target == branch_1
|
||||
assert not pr.squash
|
||||
assert pr.status == 'pending'
|
||||
assert pr.state == 'opened'
|
||||
|
||||
# check if things also work right when modifying the PR then
|
||||
# retargeting (don't see why not but...)
|
||||
@ -845,6 +888,7 @@ def test_close_staged(env, repo, config, page):
|
||||
('number', '=', prx.number),
|
||||
])
|
||||
env.run_crons()
|
||||
assert pr.reviewed_by
|
||||
assert pr.state == 'ready'
|
||||
assert pr.staging_id
|
||||
|
||||
@ -856,6 +900,18 @@ def test_close_staged(env, repo, config, page):
|
||||
assert not env['runbot_merge.stagings'].search([])
|
||||
assert pr.state == 'closed'
|
||||
assert pr_page(page, prx).cssselect('.alert-light')
|
||||
assert not pr.reviewed_by
|
||||
|
||||
with repo:
|
||||
prx.open()
|
||||
assert pr.state == 'validated'
|
||||
assert not pr.reviewed_by
|
||||
|
||||
with repo:
|
||||
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
assert pr.reviewed_by
|
||||
pr.write({'closed': True})
|
||||
assert not pr.reviewed_by
|
||||
|
||||
def test_forward_port(env, repo, config):
|
||||
with repo:
|
||||
@ -1222,7 +1278,7 @@ class TestRetry:
|
||||
(users['reviewer'], 'hansen r+'),
|
||||
(users['reviewer'], 'hansen retry'),
|
||||
seen(env, prx, users),
|
||||
(users['user'], "I'm sorry, @{reviewer}: retry makes no sense when the PR is not in error.".format_map(users)),
|
||||
(users['user'], "@{reviewer} retry makes no sense when the PR is not in error.".format_map(users)),
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize('disabler', ['user', 'other', 'reviewer'])
|
||||
@ -2130,23 +2186,28 @@ class TestPRUpdate(object):
|
||||
repo.update_ref(prx.ref, c2, force=True)
|
||||
assert pr.head == c2
|
||||
|
||||
def test_reopen_update(self, env, repo):
|
||||
def test_reopen_update(self, env, repo, config):
|
||||
with repo:
|
||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
||||
repo.make_ref('heads/master', m)
|
||||
|
||||
c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'})
|
||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
||||
prx.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||
|
||||
pr = to_pr(env, prx)
|
||||
assert pr.state == 'approved'
|
||||
assert pr.reviewed_by
|
||||
with repo:
|
||||
prx.close()
|
||||
assert pr.state == 'closed'
|
||||
assert pr.head == c
|
||||
assert not pr.reviewed_by
|
||||
|
||||
with repo:
|
||||
prx.open()
|
||||
assert pr.state == 'opened'
|
||||
assert not pr.reviewed_by
|
||||
|
||||
with repo:
|
||||
c2 = repo.make_commit(c, 'first', None, tree={'m': 'cc'})
|
||||
@ -2393,6 +2454,7 @@ class TestPRUpdate(object):
|
||||
env.run_crons('runbot_merge.process_updated_commits')
|
||||
assert pr_id.message == 'title\n\nbody'
|
||||
assert pr_id.state == 'ready'
|
||||
old_reviewer = pr_id.reviewed_by
|
||||
|
||||
# TODO: find way to somehow skip / ignore the update_ref?
|
||||
with repo:
|
||||
@ -2413,10 +2475,12 @@ class TestPRUpdate(object):
|
||||
# in a "ready" state
|
||||
pr_id.write({
|
||||
'head': c,
|
||||
'state': 'ready',
|
||||
'reviewed_by': old_reviewer.id,
|
||||
'message': "Something else",
|
||||
'target': other.id,
|
||||
})
|
||||
assert pr_id.head == c
|
||||
assert pr_id.state == "ready"
|
||||
|
||||
env.run_crons()
|
||||
|
||||
@ -2425,8 +2489,8 @@ class TestPRUpdate(object):
|
||||
assert pr_id.head == c2
|
||||
assert pr_id.message == 'title\n\nbody'
|
||||
assert pr_id.target.name == 'master'
|
||||
assert pr.comments[-1]['body'] == """\
|
||||
@{} @{} we apparently missed updates to this PR and tried to stage it in a state \
|
||||
assert pr.comments[-1]['body'] == f"""\
|
||||
@{users['user']} we apparently missed updates to this PR and tried to stage it in a state \
|
||||
which might not have been approved.
|
||||
|
||||
The properties Head, Target, Message were not correctly synchronized and have been updated.
|
||||
@ -2435,8 +2499,8 @@ The properties Head, Target, Message were not correctly synchronized and have be
|
||||
|
||||
```diff
|
||||
Head:
|
||||
- {}
|
||||
+ {}
|
||||
- {c}
|
||||
+ {c2}
|
||||
|
||||
Target branch:
|
||||
- somethingelse
|
||||
@ -2454,7 +2518,7 @@ The properties Head, Target, Message were not correctly synchronized and have be
|
||||
Note that we are unable to check the properties Merge Method, Overrides, Draft.
|
||||
|
||||
Please check and re-approve.
|
||||
""".format(users['user'], users['reviewer'], c, c2)
|
||||
"""
|
||||
|
||||
# if the head commit doesn't change, that part should still be valid
|
||||
with repo:
|
||||
@ -2465,8 +2529,8 @@ Please check and re-approve.
|
||||
|
||||
assert pr_id.message == 'title\n\nbody'
|
||||
assert pr_id.state == 'validated'
|
||||
assert pr.comments[-1]['body'] == """\
|
||||
@{} @{} we apparently missed updates to this PR and tried to stage it in a state \
|
||||
assert pr.comments[-1]['body'] == f"""\
|
||||
@{users['user']} we apparently missed updates to this PR and tried to stage it in a state \
|
||||
which might not have been approved.
|
||||
|
||||
The properties Message were not correctly synchronized and have been updated.
|
||||
@ -2486,11 +2550,11 @@ The properties Message were not correctly synchronized and have been updated.
|
||||
Note that we are unable to check the properties Merge Method, Overrides, Draft.
|
||||
|
||||
Please check and re-approve.
|
||||
""".format(users['user'], users['reviewer'])
|
||||
"""
|
||||
|
||||
pr_id.write({
|
||||
'head': c,
|
||||
'state': 'ready',
|
||||
'reviewed_by': old_reviewer.id,
|
||||
'message': "Something else",
|
||||
'target': other.id,
|
||||
'draft': True,
|
||||
@ -2695,6 +2759,9 @@ class TestBatching(object):
|
||||
def test_batching_pressing(self, env, repo, config):
|
||||
""" "Pressing" PRs should be selected before normal & batched together
|
||||
"""
|
||||
# by limiting the batch size to 3 we allow both high-priority PRs, but
|
||||
# a single normal priority one
|
||||
env['runbot_merge.project'].search([]).batch_limit = 3
|
||||
with repo:
|
||||
m = repo.make_commit(None, 'initial', None, tree={'a': 'some content'})
|
||||
repo.make_ref('heads/master', m)
|
||||
@ -2704,51 +2771,56 @@ class TestBatching(object):
|
||||
|
||||
pr11 = self._pr(repo, 'Pressing1', [{'x': 'x'}, {'y': 'y'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'])
|
||||
pr12 = self._pr(repo, 'Pressing2', [{'z': 'z'}, {'zz': 'zz'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'])
|
||||
pr11.post_comment('hansen priority=1', config['role_reviewer']['token'])
|
||||
pr12.post_comment('hansen priority=1', config['role_reviewer']['token'])
|
||||
|
||||
pr21, pr22, pr11, pr12 = prs = [to_pr(env, pr) for pr in [pr21, pr22, pr11, pr12]]
|
||||
assert pr21.priority == pr22.priority == 2
|
||||
assert pr11.priority == pr12.priority == 1
|
||||
|
||||
pr11.post_comment('hansen priority', config['role_reviewer']['token'])
|
||||
pr12.post_comment('hansen priority', config['role_reviewer']['token'])
|
||||
# necessary to project commit statuses onto PRs
|
||||
env.run_crons()
|
||||
|
||||
pr21, pr22, pr11, pr12 = prs = [to_pr(env, pr) for pr in [pr21, pr22, pr11, pr12]]
|
||||
assert pr11.priority == pr12.priority == 'priority'
|
||||
assert pr21.priority == pr22.priority == 'default'
|
||||
assert all(pr.state == 'ready' for pr in prs)
|
||||
assert not pr21.staging_id
|
||||
|
||||
staging = ensure_one(env['runbot_merge.stagings'].search([]))
|
||||
assert staging.pr_ids == pr11 | pr12 | pr21
|
||||
assert list(staging.batch_ids) == [
|
||||
pr11.batch_id,
|
||||
pr12.batch_id,
|
||||
pr21.batch_id,
|
||||
]
|
||||
assert not pr22.staging_id
|
||||
assert pr11.staging_id
|
||||
assert pr12.staging_id
|
||||
assert pr11.staging_id == pr12.staging_id
|
||||
|
||||
def test_batching_urgent(self, env, repo, config):
|
||||
with repo:
|
||||
m = repo.make_commit(None, 'initial', None, tree={'a': 'some content'})
|
||||
repo.make_ref('heads/master', m)
|
||||
|
||||
pr21 = self._pr(repo, 'PR1', [{'a': 'AAA'}, {'b': 'BBB'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'])
|
||||
pr22 = self._pr(repo, 'PR2', [{'c': 'CCC'}, {'d': 'DDD'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'])
|
||||
|
||||
pr11 = self._pr(repo, 'Pressing1', [{'x': 'x'}, {'y': 'y'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'])
|
||||
pr12 = self._pr(repo, 'Pressing2', [{'z': 'z'}, {'zz': 'zz'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'])
|
||||
pr11.post_comment('hansen priority=1', config['role_reviewer']['token'])
|
||||
pr12.post_comment('hansen priority=1', config['role_reviewer']['token'])
|
||||
pr11.post_comment('hansen NOW', config['role_reviewer']['token'])
|
||||
pr12.post_comment('hansen NOW', config['role_reviewer']['token'])
|
||||
|
||||
# stage PR1
|
||||
# stage current PRs
|
||||
env.run_crons()
|
||||
p_11, p_12, p_21, p_22 = \
|
||||
[to_pr(env, pr) for pr in [pr11, pr12, pr21, pr22]]
|
||||
assert not p_21.staging_id or p_22.staging_id
|
||||
assert p_11.staging_id and p_12.staging_id
|
||||
assert p_11.staging_id == p_12.staging_id
|
||||
staging_1 = p_11.staging_id
|
||||
p_11, p_12 = \
|
||||
[to_pr(env, pr) for pr in [pr11, pr12]]
|
||||
sm_all = p_11 | p_12
|
||||
staging_1 = sm_all.staging_id
|
||||
assert staging_1
|
||||
assert len(staging_1) == 1
|
||||
assert list(staging_1.batch_ids) == [
|
||||
p_11.batch_id,
|
||||
p_12.batch_id,
|
||||
]
|
||||
|
||||
# no statuses run on PR0s
|
||||
with repo:
|
||||
pr01 = self._pr(repo, 'Urgent1', [{'n': 'n'}, {'o': 'o'}], user=config['role_user']['token'], reviewer=None, statuses=[])
|
||||
pr01.post_comment('hansen priority=0 rebase-merge', config['role_reviewer']['token'])
|
||||
pr01.post_comment('hansen NOW! rebase-merge', config['role_reviewer']['token'])
|
||||
p_01 = to_pr(env, pr01)
|
||||
assert p_01.state == 'opened'
|
||||
assert p_01.priority == 0
|
||||
assert p_01.state == 'ready'
|
||||
assert p_01.priority == 'alone'
|
||||
assert p_01.skipchecks == True
|
||||
|
||||
env.run_crons()
|
||||
# first staging should be cancelled and PR0 should be staged
|
||||
@ -2756,9 +2828,89 @@ class TestBatching(object):
|
||||
assert not staging_1.active
|
||||
assert not p_11.staging_id and not p_12.staging_id
|
||||
assert p_01.staging_id
|
||||
assert p_11.state == 'ready'
|
||||
assert p_12.state == 'ready'
|
||||
|
||||
# make the staging fail
|
||||
with repo:
|
||||
repo.post_status('staging.master', 'failure', 'ci/runbot')
|
||||
env.run_crons()
|
||||
assert p_01.error
|
||||
assert p_01.batch_id.blocked
|
||||
assert p_01.blocked
|
||||
|
||||
assert p_01.state == 'error'
|
||||
assert not p_01.staging_id.active
|
||||
staging_2 = ensure_one(sm_all.staging_id)
|
||||
assert staging_2 != staging_1
|
||||
|
||||
with repo:
|
||||
pr01.post_comment('hansen retry', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
# retry should have re-triggered cancel-staging
|
||||
assert not staging_2.active
|
||||
assert p_01.staging_id.active
|
||||
|
||||
# make the staging fail again
|
||||
with repo:
|
||||
repo.post_status('staging.master', 'failure', 'ci/runbot')
|
||||
env.run_crons()
|
||||
|
||||
assert not p_01.staging_id.active
|
||||
assert p_01.state == 'error'
|
||||
staging_3 = ensure_one(sm_all.staging_id)
|
||||
assert staging_3 != staging_2
|
||||
|
||||
# check that updating the PR resets it to ~ready
|
||||
with repo:
|
||||
repo.make_commits(
|
||||
'heads/master',
|
||||
Commit("urgent+", tree={'y': 'es'}),
|
||||
ref="heads/Urgent1",
|
||||
)
|
||||
env.run_crons()
|
||||
assert not staging_3.active
|
||||
assert p_01.state == 'ready'
|
||||
assert p_01.priority == 'alone'
|
||||
assert p_01.skipchecks == True
|
||||
assert p_01.staging_id.active
|
||||
|
||||
# r- should unstage, re-enable the checks and switch off staging
|
||||
# cancellation, but leave the priority
|
||||
with repo:
|
||||
pr01.post_comment("hansen r-", config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
staging_4 = ensure_one(sm_all.staging_id)
|
||||
assert staging_4 != staging_3
|
||||
|
||||
assert not p_01.staging_id.active
|
||||
assert p_01.state == 'opened'
|
||||
assert p_01.priority == 'alone'
|
||||
assert p_01.skipchecks == False
|
||||
assert p_01.cancel_staging == True
|
||||
|
||||
assert staging_4.active, "staging should not be disabled"
|
||||
|
||||
# cause the PR to become ready the normal way
|
||||
with repo:
|
||||
pr01.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||
repo.post_status(p_01.head, 'success', 'legal/cla')
|
||||
repo.post_status(p_01.head, 'success', 'ci/runbot')
|
||||
env.run_crons()
|
||||
|
||||
# a cancel_staging pr becoming ready should have cancelled the staging,
|
||||
# and because the PR is `alone` it should... have been restaged alone,
|
||||
# without the ready non-alone PRs
|
||||
assert not sm_all.staging_id.active
|
||||
assert p_01.staging_id.active
|
||||
assert p_01.state == 'ready'
|
||||
assert p_01.priority == 'alone'
|
||||
assert p_01.skipchecks == False
|
||||
assert p_01.cancel_staging == True
|
||||
|
||||
def test_batching_urgenter_than_split(self, env, repo, config):
|
||||
""" p=0 PRs should take priority over split stagings (processing
|
||||
""" p=alone PRs should take priority over split stagings (processing
|
||||
of a staging having CI-failed and being split into sub-stagings)
|
||||
"""
|
||||
with repo:
|
||||
@ -2789,7 +2941,7 @@ class TestBatching(object):
|
||||
# during restaging of pr1, create urgent PR
|
||||
with repo:
|
||||
pr0 = self._pr(repo, 'urgent', [{'a': 'a', 'b': 'b'}], user=config['role_user']['token'], reviewer=None, statuses=[])
|
||||
pr0.post_comment('hansen priority=0', config['role_reviewer']['token'])
|
||||
pr0.post_comment('hansen NOW!', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
# TODO: maybe just deactivate stagings instead of deleting them when canceling?
|
||||
@ -2810,7 +2962,7 @@ class TestBatching(object):
|
||||
# no statuses run on PR0s
|
||||
with repo:
|
||||
pr01 = self._pr(repo, 'Urgent1', [{'n': 'n'}, {'o': 'o'}], user=config['role_user']['token'], reviewer=None, statuses=[])
|
||||
pr01.post_comment('hansen priority=0', config['role_reviewer']['token'])
|
||||
pr01.post_comment('hansen NOW!', config['role_reviewer']['token'])
|
||||
p_01 = to_pr(env, pr01)
|
||||
p_01.state = 'error'
|
||||
|
||||
@ -2818,6 +2970,50 @@ class TestBatching(object):
|
||||
assert not p_01.staging_id, "p_01 should not be picked up as it's failed"
|
||||
assert p_21.staging_id, "p_21 should have been staged"
|
||||
|
||||
def test_urgent_split(self, env, repo, config):
|
||||
"""Ensure that urgent (alone) PRs which get split don't get
|
||||
double-merged
|
||||
"""
|
||||
with repo:
|
||||
repo.make_commits(
|
||||
None,
|
||||
Commit("initial", tree={'a': '1'}),
|
||||
ref="heads/master"
|
||||
)
|
||||
|
||||
pr01 = self._pr(
|
||||
repo, "PR1", [{'b': '1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=None,
|
||||
)
|
||||
pr01.post_comment('hansen alone r+', config['role_reviewer']['token'])
|
||||
pr02 = self._pr(
|
||||
repo, "PR2", [{'c': '1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=None,
|
||||
)
|
||||
pr02.post_comment('hansen alone r+', config['role_reviewer']['token'])
|
||||
env.run_crons('runbot_merge.process_updated_commits')
|
||||
pr01_id = to_pr(env, pr01)
|
||||
assert pr01_id.blocked is False
|
||||
pr02_id = to_pr(env, pr02)
|
||||
assert pr01_id.blocked is False
|
||||
|
||||
env.run_crons()
|
||||
st = pr01_id.staging_id
|
||||
assert st and pr02_id.staging_id == st
|
||||
with repo:
|
||||
repo.post_status('staging.master', 'failure', 'ci/runbot')
|
||||
env.run_crons()
|
||||
# should have cancelled the staging, split it, and re-staged the first
|
||||
# half of the split
|
||||
assert st.state == 'failure'
|
||||
assert pr01_id.staging_id and pr01_id.staging_id != st
|
||||
assert not pr02_id.staging_id
|
||||
split_prs = env['runbot_merge.split'].search([]).batch_ids.prs
|
||||
assert split_prs == pr02_id, \
|
||||
f"only the unstaged PR {pr02_id} should be in a split, found {split_prs}"
|
||||
|
||||
@pytest.mark.skip(reason="Maybe nothing to do, the PR is just skipped and put in error?")
|
||||
def test_batching_merge_failure(self):
|
||||
pass
|
||||
@ -2871,7 +3067,7 @@ class TestBatching(object):
|
||||
env.run_crons('runbot_merge.process_updated_commits', 'runbot_merge.merge_cron', 'runbot_merge.staging_cron')
|
||||
assert pr2.state == 'merged'
|
||||
|
||||
class TestReviewing(object):
|
||||
class TestReviewing:
|
||||
def test_reviewer_rights(self, env, repo, users, config):
|
||||
"""Only users with review rights will have their r+ (and other
|
||||
attributes) taken in account
|
||||
@ -2909,7 +3105,7 @@ class TestReviewing(object):
|
||||
(users['user'], "I'm sorry, @{}. I'm afraid I can't do that.".format(users['other'])),
|
||||
(users['reviewer'], 'hansen r+'),
|
||||
(users['reviewer'], 'hansen r+'),
|
||||
(users['user'], "I'm sorry, @{}: this PR is already reviewed, reviewing it again is useless.".format(
|
||||
(users['user'], "@{} this PR is already reviewed, reviewing it again is useless.".format(
|
||||
users['reviewer'])),
|
||||
]
|
||||
|
||||
@ -2937,7 +3133,7 @@ class TestReviewing(object):
|
||||
assert prx.comments == [
|
||||
(users['reviewer'], 'hansen r+'),
|
||||
seen(env, prx, users),
|
||||
(users['user'], "I'm sorry, @{}: you can't review+.".format(users['reviewer'])),
|
||||
(users['user'], "@{} you can't review+. Skill issue.".format(users['reviewer'])),
|
||||
]
|
||||
|
||||
def test_self_review_success(self, env, repo, users, config):
|
||||
@ -3048,23 +3244,23 @@ class TestReviewing(object):
|
||||
])
|
||||
|
||||
with repo:
|
||||
prx.post_review('COMMENT', "hansen priority=1", config['role_reviewer']['token'])
|
||||
assert pr.priority == 1
|
||||
prx.post_review('COMMENT', "hansen priority", config['role_reviewer']['token'])
|
||||
assert pr.priority == 'priority'
|
||||
assert pr.state == 'opened'
|
||||
|
||||
with repo:
|
||||
prx.post_review('APPROVE', "hansen priority=2", config['role_reviewer']['token'])
|
||||
assert pr.priority == 2
|
||||
prx.post_review('APPROVE', "hansen default", config['role_reviewer']['token'])
|
||||
assert pr.priority == 'default'
|
||||
assert pr.state == 'opened'
|
||||
|
||||
with repo:
|
||||
prx.post_review('REQUEST_CHANGES', 'hansen priority=1', config['role_reviewer']['token'])
|
||||
assert pr.priority == 1
|
||||
prx.post_review('REQUEST_CHANGES', 'hansen priority', config['role_reviewer']['token'])
|
||||
assert pr.priority == 'priority'
|
||||
assert pr.state == 'opened'
|
||||
|
||||
with repo:
|
||||
prx.post_review('COMMENT', 'hansen r+', config['role_reviewer']['token'])
|
||||
assert pr.priority == 1
|
||||
assert pr.priority == 'priority'
|
||||
assert pr.state == 'approved'
|
||||
|
||||
def test_no_email(self, env, repo, users, config, partners):
|
||||
@ -3092,7 +3288,7 @@ class TestReviewing(object):
|
||||
seen(env, pr, users),
|
||||
(users['reviewer'], 'hansen delegate+'),
|
||||
(users['user'], 'hansen r+'),
|
||||
(users['user'], f"I'm sorry, @{users['user']}: I must know your email before you can review PRs. Please contact an administrator."),
|
||||
(users['user'], f"@{users['user']} I must know your email before you can review PRs. Please contact an administrator."),
|
||||
]
|
||||
user_partner.fetch_github_email()
|
||||
assert user_partner.email
|
||||
@ -3101,6 +3297,28 @@ class TestReviewing(object):
|
||||
env.run_crons()
|
||||
assert to_pr(env, pr).state == 'approved'
|
||||
|
||||
def test_skipchecks(self, env, repo, users, config):
|
||||
"""Skipcheck makes the PR immediately ready (if it's not in error or
|
||||
something)
|
||||
"""
|
||||
with repo:
|
||||
[m, _] = repo.make_commits(
|
||||
None,
|
||||
Commit("initial", tree={'m': 'm'}),
|
||||
Commit("second", tree={"m2": "m2"}),
|
||||
ref="heads/master"
|
||||
)
|
||||
|
||||
[c1] = repo.make_commits(m, Commit('first', tree={'m': 'c1'}))
|
||||
pr = repo.make_pr(title='title', target='master', head=c1)
|
||||
pr.post_comment('hansen skipchecks', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
pr_id = to_pr(env, pr)
|
||||
# assert pr_id.state == 'ready'
|
||||
assert not pr_id.blocked
|
||||
# since the pr is not blocked it should have been staged by the relevant cron
|
||||
assert pr_id.staging_id
|
||||
|
||||
class TestUnknownPR:
|
||||
""" Sync PRs initially looked excellent but aside from the v4 API not
|
||||
@ -3157,7 +3375,7 @@ class TestUnknownPR:
|
||||
(users['reviewer'], 'hansen r+'),
|
||||
(users['reviewer'], 'hansen r+'),
|
||||
seen(env, prx, users),
|
||||
(users['user'], f"@{users['user']} @{users['reviewer']} I didn't know about this PR and had to "
|
||||
(users['user'], f"@{users['user']} I didn't know about this PR and had to "
|
||||
"retrieve its information, you may have to "
|
||||
"re-approve it as I didn't see previous commands."),
|
||||
]
|
||||
@ -3213,7 +3431,7 @@ class TestUnknownPR:
|
||||
# reviewer is set because fetch replays all the comments (thus
|
||||
# setting r+ and reviewer) but then syncs the head commit thus
|
||||
# unsetting r+ but leaving the reviewer
|
||||
(users['user'], f"@{users['user']} @{users['reviewer']} I didn't know about this PR and had to retrieve "
|
||||
(users['user'], f"@{users['user']} I didn't know about this PR and had to retrieve "
|
||||
"its information, you may have to re-approve it "
|
||||
"as I didn't see previous commands."),
|
||||
]
|
||||
@ -3394,6 +3612,8 @@ class TestRecognizeCommands:
|
||||
(users['reviewer'], "hansen do the thing"),
|
||||
(users['reviewer'], "hansen @bobby-b r+ :+1:"),
|
||||
seen(env, pr, users),
|
||||
(users['user'], "@{reviewer} unknown command 'do'".format_map(users)),
|
||||
(users['user'], "@{reviewer} unknown command '@bobby-b'".format_map(users)),
|
||||
]
|
||||
|
||||
class TestRMinus:
|
||||
@ -3574,41 +3794,6 @@ class TestRMinus:
|
||||
assert pr2.state == 'validated', "state should have been reset"
|
||||
assert not env['runbot_merge.split'].search([]), "there should be no split left"
|
||||
|
||||
def test_rminus_p0(self, env, repo, config, users):
|
||||
""" In and of itself r- doesn't do anything on p=0 since they bypass
|
||||
approval, so unstage and downgrade to p=1.
|
||||
"""
|
||||
|
||||
with repo:
|
||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
||||
repo.make_ref('heads/master', m)
|
||||
|
||||
c = repo.make_commit(m, 'first', None, tree={'m': 'c'})
|
||||
prx = repo.make_pr(title='title', body=None, target='master', head=c)
|
||||
repo.post_status(prx.head, 'success', 'ci/runbot')
|
||||
repo.post_status(prx.head, 'success', 'legal/cla')
|
||||
prx.post_comment('hansen p=0', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
pr = env['runbot_merge.pull_requests'].search([
|
||||
('repository.name', '=', repo.name),
|
||||
('number', '=', prx.number),
|
||||
])
|
||||
assert pr.priority == 0
|
||||
assert pr.staging_id
|
||||
|
||||
with repo:
|
||||
prx.post_comment('hansen r-', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
assert not pr.staging_id, "pr should have been unstaged"
|
||||
assert pr.priority == 1, "priority should have been downgraded"
|
||||
assert prx.comments == [
|
||||
(users['reviewer'], 'hansen p=0'),
|
||||
seen(env, prx, users),
|
||||
(users['reviewer'], 'hansen r-'),
|
||||
(users['user'], "PR priority reset to 1, as pull requests with priority 0 ignore review state."),
|
||||
]
|
||||
|
||||
class TestComments:
|
||||
def test_address_method(self, repo, env, config):
|
||||
with repo:
|
||||
|
104
runbot_merge/tests/test_batch_consistency.py
Normal file
104
runbot_merge/tests/test_batch_consistency.py
Normal file
@ -0,0 +1,104 @@
|
||||
"""This module tests edge cases specific to the batch objects themselves,
|
||||
without wider relevance and thus other location.
|
||||
"""
|
||||
from utils import Commit, to_pr
|
||||
|
||||
|
||||
def test_close_single(env, project, make_repo, setreviewers):
|
||||
"""If a batch has a single PR and that PR gets closed, the batch should be
|
||||
inactive *and* blocked.
|
||||
"""
|
||||
repo = make_repo('wheee')
|
||||
r = env['runbot_merge.repository'].create({
|
||||
'project_id': project.id,
|
||||
'name': repo.name,
|
||||
'required_statuses': 'default',
|
||||
'group_id': False,
|
||||
})
|
||||
setreviewers(r)
|
||||
|
||||
with repo:
|
||||
repo.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master')
|
||||
[c] = repo.make_commits('master', Commit('b', tree={"b": "b"}))
|
||||
pr = repo.make_pr(head=c, target='master')
|
||||
env.run_crons()
|
||||
|
||||
pr_id = to_pr(env, pr)
|
||||
batch_id = pr_id.batch_id
|
||||
assert pr_id.state == 'opened'
|
||||
assert batch_id.blocked
|
||||
Batches = env['runbot_merge.batch']
|
||||
assert Batches.search_count([]) == 1
|
||||
|
||||
with repo:
|
||||
pr.close()
|
||||
|
||||
assert pr_id.state == 'closed'
|
||||
assert batch_id.all_prs == pr_id
|
||||
assert batch_id.prs == pr_id.browse(())
|
||||
assert batch_id.blocked == "all prs are closed"
|
||||
assert not batch_id.active
|
||||
|
||||
assert Batches.search_count([]) == 0
|
||||
|
||||
def test_close_multiple(env, project, make_repo, setreviewers):
|
||||
"""If a batch has a single PR and that PR gets closed, the batch should be
|
||||
inactive *and* blocked.
|
||||
"""
|
||||
Batches = env['runbot_merge.batch']
|
||||
repo1 = make_repo('wheee')
|
||||
repo2 = make_repo('wheeee')
|
||||
project.write({
|
||||
'repo_ids': [(0, 0, {
|
||||
'name': repo1.name,
|
||||
'required_statuses': 'default',
|
||||
'group_id': False,
|
||||
}), (0, 0, {
|
||||
'name': repo2.name,
|
||||
'required_statuses': 'default',
|
||||
'group_id': False,
|
||||
})]
|
||||
})
|
||||
setreviewers(*project.repo_ids)
|
||||
|
||||
with repo1:
|
||||
repo1.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master')
|
||||
repo1.make_commits('master', Commit('b', tree={"b": "b"}), ref='heads/a_pr')
|
||||
pr1 = repo1.make_pr(head='a_pr', target='master')
|
||||
|
||||
with repo2:
|
||||
repo2.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master')
|
||||
repo2.make_commits('master', Commit('b', tree={"b": "b"}), ref='heads/a_pr')
|
||||
pr2 = repo2.make_pr(head='a_pr', target='master')
|
||||
|
||||
pr1_id = to_pr(env, pr1)
|
||||
pr2_id = to_pr(env, pr2)
|
||||
batch_id = pr1_id.batch_id
|
||||
assert pr2_id.batch_id == batch_id
|
||||
|
||||
assert pr1_id.state == 'opened'
|
||||
assert pr2_id.state == 'opened'
|
||||
assert batch_id.all_prs == pr1_id | pr2_id
|
||||
assert batch_id.prs == pr1_id | pr2_id
|
||||
assert batch_id.active
|
||||
assert Batches.search_count([]) == 1
|
||||
|
||||
with repo1:
|
||||
pr1.close()
|
||||
|
||||
assert pr1_id.state == 'closed'
|
||||
assert pr2_id.state == 'opened'
|
||||
assert batch_id.all_prs == pr1_id | pr2_id
|
||||
assert batch_id.prs == pr2_id
|
||||
assert batch_id.active
|
||||
assert Batches.search_count([]) == 1
|
||||
|
||||
with repo2:
|
||||
pr2.close()
|
||||
|
||||
assert pr1_id.state == 'closed'
|
||||
assert pr2_id.state == 'closed'
|
||||
assert batch_id.all_prs == pr1_id | pr2_id
|
||||
assert batch_id.prs == env['runbot_merge.pull_requests'].browse(())
|
||||
assert not batch_id.active
|
||||
assert Batches.search_count([]) == 0
|
@ -59,12 +59,9 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
|
||||
assert staging_id.reason == "Target branch deactivated by 'admin'."
|
||||
|
||||
p = pr_page(page, pr)
|
||||
target = dict(zip(
|
||||
(e.text for e in p.cssselect('dl.runbot-merge-fields dt')),
|
||||
(p.cssselect('dl.runbot-merge-fields dd'))
|
||||
))['target']
|
||||
assert target.text_content() == 'other (inactive)'
|
||||
assert target.get('class') == 'text-muted bg-warning'
|
||||
[target] = p.cssselect('table tr.bg-info')
|
||||
assert 'inactive' in target.classes
|
||||
assert target[0].text_content() == "other"
|
||||
|
||||
assert pr.comments == [
|
||||
(users['reviewer'], "hansen r+"),
|
||||
|
@ -5,8 +5,11 @@ source branches).
|
||||
When preparing a staging, we simply want to ensure branch-matched PRs
|
||||
are staged concurrently in all repos
|
||||
"""
|
||||
import functools
|
||||
import operator
|
||||
import time
|
||||
import xmlrpc.client
|
||||
from itertools import repeat
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
@ -21,7 +24,7 @@ def repo_a(project, make_repo, setreviewers):
|
||||
r = project.env['runbot_merge.repository'].create({
|
||||
'project_id': project.id,
|
||||
'name': repo.name,
|
||||
'required_statuses': 'legal/cla,ci/runbot',
|
||||
'required_statuses': 'default',
|
||||
'group_id': False,
|
||||
})
|
||||
setreviewers(r)
|
||||
@ -33,7 +36,7 @@ def repo_b(project, make_repo, setreviewers):
|
||||
r = project.env['runbot_merge.repository'].create({
|
||||
'project_id': project.id,
|
||||
'name': repo.name,
|
||||
'required_statuses': 'legal/cla,ci/runbot',
|
||||
'required_statuses': 'default',
|
||||
'group_id': False,
|
||||
})
|
||||
setreviewers(r)
|
||||
@ -45,14 +48,14 @@ def repo_c(project, make_repo, setreviewers):
|
||||
r = project.env['runbot_merge.repository'].create({
|
||||
'project_id': project.id,
|
||||
'name': repo.name,
|
||||
'required_statuses': 'legal/cla,ci/runbot',
|
||||
'required_statuses': 'default',
|
||||
'group_id': False,
|
||||
})
|
||||
setreviewers(r)
|
||||
return repo
|
||||
|
||||
def make_pr(repo, prefix, trees, *, target='master', user,
|
||||
statuses=(('ci/runbot', 'success'), ('legal/cla', 'success')),
|
||||
statuses=(('default', 'success'),),
|
||||
reviewer):
|
||||
"""
|
||||
:type repo: fake_github.Repo
|
||||
@ -80,12 +83,6 @@ def make_pr(repo, prefix, trees, *, target='master', user,
|
||||
pr.post_comment('hansen r+', reviewer)
|
||||
return pr
|
||||
|
||||
def make_branch(repo, name, message, tree, protect=True):
|
||||
c = repo.make_commit(None, message, None, tree=tree)
|
||||
repo.make_ref('heads/%s' % name, c)
|
||||
if protect:
|
||||
repo.protect(name)
|
||||
return c
|
||||
|
||||
@pytest.mark.parametrize('uniquifier', [False, True])
|
||||
def test_stage_one(env, project, repo_a, repo_b, config, uniquifier):
|
||||
@ -95,14 +92,14 @@ def test_stage_one(env, project, repo_a, repo_b, config, uniquifier):
|
||||
project.batch_limit = 1
|
||||
|
||||
with repo_a:
|
||||
make_branch(repo_a, 'master', 'initial', {'a': 'a_0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master')
|
||||
pr_a = make_pr(
|
||||
repo_a, 'A', [{'a': 'a_1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=config['role_reviewer']['token'])
|
||||
|
||||
with repo_b:
|
||||
make_branch(repo_b, 'master', 'initial', {'a': 'b_0'})
|
||||
repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master')
|
||||
pr_b = make_pr(
|
||||
repo_b, 'B', [{'a': 'b_1'}],
|
||||
user=config['role_user']['token'],
|
||||
@ -130,14 +127,14 @@ def test_stage_match(env, project, repo_a, repo_b, config, page):
|
||||
project.batch_limit = 1
|
||||
|
||||
with repo_a:
|
||||
make_branch(repo_a, 'master', 'initial', {'a': 'a_0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master')
|
||||
prx_a = make_pr(
|
||||
repo_a, 'do-a-thing', [{'a': 'a_1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=config['role_reviewer']['token'],
|
||||
)
|
||||
with repo_b:
|
||||
make_branch(repo_b, 'master', 'initial', {'a': 'b_0'})
|
||||
repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master')
|
||||
prx_b = make_pr(repo_b, 'do-a-thing', [{'a': 'b_1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=config['role_reviewer']['token'],
|
||||
@ -163,7 +160,7 @@ def test_stage_match(env, project, repo_a, repo_b, config, page):
|
||||
assert get_related_pr_labels(pr_page(page, prx_a)) == [pr_b.display_name]
|
||||
assert get_related_pr_labels(pr_page(page, prx_b)) == [pr_a.display_name]
|
||||
with repo_a:
|
||||
repo_a.post_status('staging.master', 'failure', 'legal/cla')
|
||||
repo_a.post_status('staging.master', 'failure')
|
||||
env.run_crons()
|
||||
|
||||
assert pr_a.state == 'error'
|
||||
@ -177,8 +174,7 @@ def test_stage_match(env, project, repo_a, repo_b, config, page):
|
||||
assert pr_a.staging_id and pr_b.staging_id
|
||||
for repo in [repo_a, repo_b]:
|
||||
with repo:
|
||||
repo.post_status('staging.master', 'success', 'legal/cla')
|
||||
repo.post_status('staging.master', 'success', 'ci/runbot')
|
||||
repo.post_status('staging.master', 'success')
|
||||
env.run_crons()
|
||||
assert pr_a.state == 'merged'
|
||||
assert pr_b.state == 'merged'
|
||||
@ -198,8 +194,8 @@ def test_different_targets(env, project, repo_a, repo_b, config):
|
||||
'branch_ids': [(0, 0, {'name': 'other'})]
|
||||
})
|
||||
with repo_a:
|
||||
make_branch(repo_a, 'master', 'initial', {'master': 'a_0'})
|
||||
make_branch(repo_a, 'other', 'initial', {'other': 'a_0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'master': 'a_0'}), ref='heads/master')
|
||||
repo_a.make_commits(None, Commit('initial', tree={'other': 'a_0'}), ref='heads/other')
|
||||
pr_a = make_pr(
|
||||
repo_a, 'do-a-thing', [{'mater': 'a_1'}],
|
||||
target='master',
|
||||
@ -207,8 +203,8 @@ def test_different_targets(env, project, repo_a, repo_b, config):
|
||||
reviewer=config['role_reviewer']['token'],
|
||||
)
|
||||
with repo_b:
|
||||
make_branch(repo_b, 'master', 'initial', {'master': 'b_0'})
|
||||
make_branch(repo_b, 'other', 'initial', {'other': 'b_0'})
|
||||
repo_b.make_commits(None, Commit('initial', tree={'master': 'b_0'}), ref='heads/master')
|
||||
repo_b.make_commits(None, Commit('initial', tree={'other': 'b_0'}), ref='heads/other')
|
||||
pr_b = make_pr(
|
||||
repo_b, 'do-a-thing', [{'other': 'b_1'}],
|
||||
target='other',
|
||||
@ -231,8 +227,7 @@ def test_different_targets(env, project, repo_a, repo_b, config):
|
||||
|
||||
for r in [repo_a, repo_b]:
|
||||
with r:
|
||||
r.post_status('staging.master', 'success', 'legal/cla')
|
||||
r.post_status('staging.master', 'success', 'ci/runbot')
|
||||
r.post_status('staging.master', 'success')
|
||||
env.run_crons()
|
||||
assert pr_a.state == 'merged'
|
||||
|
||||
@ -246,7 +241,7 @@ def test_stage_different_statuses(env, project, repo_a, repo_b, config):
|
||||
})
|
||||
|
||||
with repo_a:
|
||||
make_branch(repo_a, 'master', 'initial', {'a': 'a_0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master')
|
||||
pr_a = make_pr(
|
||||
repo_a, 'do-a-thing', [{'a': 'a_1'}],
|
||||
user=config['role_user']['token'],
|
||||
@ -254,17 +249,16 @@ def test_stage_different_statuses(env, project, repo_a, repo_b, config):
|
||||
)
|
||||
repo_a.post_status(pr_a.head, 'success', 'foo/bar')
|
||||
with repo_b:
|
||||
make_branch(repo_b, 'master', 'initial', {'a': 'b_0'})
|
||||
repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master')
|
||||
[c] = repo_b.make_commits(
|
||||
'heads/master',
|
||||
repo_b.Commit('some_commit\n\nSee also %s#%d' % (repo_a.name, pr_a.number), tree={'a': 'b_1'}),
|
||||
repo_b.Commit(f'some_commit\n\nSee also {repo_a.name}#{pr_a.number:d}', tree={'a': 'b_1'}),
|
||||
ref='heads/do-a-thing'
|
||||
)
|
||||
pr_b = repo_b.make_pr(
|
||||
title="title", body="body", target='master', head='do-a-thing',
|
||||
token=config['role_user']['token'])
|
||||
repo_b.post_status(c, 'success', 'ci/runbot')
|
||||
repo_b.post_status(c, 'success', 'legal/cla')
|
||||
repo_b.post_status(c, 'success')
|
||||
pr_b.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
# since the labels are the same but the statuses on pr_b are not the
|
||||
@ -288,8 +282,7 @@ def test_stage_different_statuses(env, project, repo_a, repo_b, config):
|
||||
# do the actual merge to check for the Related header
|
||||
for repo in [repo_a, repo_b]:
|
||||
with repo:
|
||||
repo.post_status('staging.master', 'success', 'legal/cla')
|
||||
repo.post_status('staging.master', 'success', 'ci/runbot')
|
||||
repo.post_status('staging.master', 'success')
|
||||
repo.post_status('staging.master', 'success', 'foo/bar')
|
||||
env.run_crons()
|
||||
|
||||
@ -318,14 +311,14 @@ def test_unmatch_patch(env, project, repo_a, repo_b, config):
|
||||
"""
|
||||
project.batch_limit = 1
|
||||
with repo_a:
|
||||
make_branch(repo_a, 'master', 'initial', {'a': 'a_0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master')
|
||||
pr_a = make_pr(
|
||||
repo_a, 'patch-1', [{'a': 'a_1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=config['role_reviewer']['token'],
|
||||
)
|
||||
with repo_b:
|
||||
make_branch(repo_b, 'master', 'initial', {'a': 'b_0'})
|
||||
repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref=f'heads/master')
|
||||
pr_b = make_pr(
|
||||
repo_b, 'patch-1', [{'a': 'b_1'}],
|
||||
user=config['role_user']['token'],
|
||||
@ -345,16 +338,16 @@ def test_sub_match(env, project, repo_a, repo_b, repo_c, config):
|
||||
"""
|
||||
project.batch_limit = 1
|
||||
with repo_a: # no pr here
|
||||
make_branch(repo_a, 'master', 'initial', {'a': 'a_0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master')
|
||||
with repo_b:
|
||||
make_branch(repo_b, 'master', 'initial', {'a': 'b_0'})
|
||||
repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master')
|
||||
pr_b = make_pr(
|
||||
repo_b, 'do-a-thing', [{'a': 'b_1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=config['role_reviewer']['token'],
|
||||
)
|
||||
with repo_c:
|
||||
make_branch(repo_c, 'master', 'initial', {'a': 'c_0'})
|
||||
repo_c.make_commits(None, Commit('initial', tree={'a': 'c_0'}), ref='heads/master')
|
||||
pr_c = make_pr(
|
||||
repo_c, 'do-a-thing', [{'a': 'c_1'}],
|
||||
user=config['role_user']['token'],
|
||||
@ -407,8 +400,8 @@ def test_merge_fail(env, project, repo_a, repo_b, users, config):
|
||||
project.batch_limit = 1
|
||||
|
||||
with repo_a, repo_b:
|
||||
make_branch(repo_a, 'master', 'initial', {'a': 'a_0'})
|
||||
make_branch(repo_b, 'master', 'initial', {'a': 'b_0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master')
|
||||
repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master')
|
||||
|
||||
# first set of matched PRs
|
||||
pr1a = make_pr(
|
||||
@ -473,14 +466,14 @@ def test_ff_fail(env, project, repo_a, repo_b, config):
|
||||
project.batch_limit = 1
|
||||
|
||||
with repo_a, repo_b:
|
||||
root_a = make_branch(repo_a, 'master', 'initial', {'a': 'a_0'})
|
||||
[root_a] = repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master')
|
||||
make_pr(
|
||||
repo_a, 'do-a-thing', [{'a': 'a_1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=config['role_reviewer']['token'],
|
||||
)
|
||||
|
||||
make_branch(repo_b, 'master', 'initial', {'a': 'b_0'})
|
||||
repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref=f'heads/master')
|
||||
make_pr(
|
||||
repo_b, 'do-a-thing', [{'a': 'b_1'}],
|
||||
user=config['role_user']['token'],
|
||||
@ -494,10 +487,8 @@ def test_ff_fail(env, project, repo_a, repo_b, config):
|
||||
assert repo_b.commit('heads/master').id == cn
|
||||
|
||||
with repo_a, repo_b:
|
||||
repo_a.post_status('heads/staging.master', 'success', 'ci/runbot')
|
||||
repo_a.post_status('heads/staging.master', 'success', 'legal/cla')
|
||||
repo_b.post_status('heads/staging.master', 'success', 'ci/runbot')
|
||||
repo_b.post_status('heads/staging.master', 'success', 'legal/cla')
|
||||
repo_a.post_status('heads/staging.master', 'success')
|
||||
repo_b.post_status('heads/staging.master', 'success')
|
||||
env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron')
|
||||
assert repo_b.commit('heads/master').id == cn,\
|
||||
"B should still be at the conflicting commit"
|
||||
@ -516,7 +507,7 @@ class TestCompanionsNotReady:
|
||||
"""
|
||||
project.batch_limit = 1
|
||||
with repo_a, repo_b:
|
||||
make_branch(repo_a, 'master', 'initial', {'a': 'a_0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master')
|
||||
# pr_a is born ready
|
||||
p_a = make_pr(
|
||||
repo_a, 'do-a-thing', [{'a': 'a_1'}],
|
||||
@ -524,7 +515,7 @@ class TestCompanionsNotReady:
|
||||
reviewer=config['role_reviewer']['token'],
|
||||
)
|
||||
|
||||
make_branch(repo_b, 'master', 'initial', {'a': 'b_0'})
|
||||
repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master')
|
||||
p_b = make_pr(
|
||||
repo_b, 'do-a-thing', [{'a': 'b_1'}],
|
||||
user=config['role_user']['token'],
|
||||
@ -571,21 +562,21 @@ class TestCompanionsNotReady:
|
||||
"""
|
||||
project.batch_limit = 1
|
||||
with repo_a, repo_b, repo_c:
|
||||
make_branch(repo_a, 'master', 'initial', {'f': 'a0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'f': 'a0'}), ref='heads/master')
|
||||
pr_a = make_pr(
|
||||
repo_a, 'a-thing', [{'f': 'a1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=None,
|
||||
)
|
||||
|
||||
make_branch(repo_b, 'master', 'initial', {'f': 'b0'})
|
||||
repo_b.make_commits(None, Commit('initial', tree={'f': 'b0'}), ref='heads/master')
|
||||
pr_b = make_pr(
|
||||
repo_b, 'a-thing', [{'f': 'b1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=config['role_reviewer']['token'],
|
||||
)
|
||||
|
||||
make_branch(repo_c, 'master', 'initial', {'f': 'c0'})
|
||||
repo_c.make_commits(None, Commit('initial', tree={'f': 'c0'}), ref='heads/master')
|
||||
pr_c = make_pr(
|
||||
repo_c, 'a-thing', [{'f': 'c1'}],
|
||||
user=config['role_user']['token'],
|
||||
@ -611,21 +602,21 @@ class TestCompanionsNotReady:
|
||||
"""
|
||||
project.batch_limit = 1
|
||||
with repo_a, repo_b, repo_c:
|
||||
make_branch(repo_a, 'master', 'initial', {'f': 'a0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'f': 'a0'}), ref='heads/master')
|
||||
pr_a = make_pr(
|
||||
repo_a, 'a-thing', [{'f': 'a1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=None,
|
||||
)
|
||||
|
||||
make_branch(repo_b, 'master', 'initial', {'f': 'b0'})
|
||||
repo_b.make_commits(None, Commit('initial', tree={'f': 'b0'}), ref='heads/master')
|
||||
pr_b = make_pr(
|
||||
repo_b, 'a-thing', [{'f': 'b1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=config['role_reviewer']['token'],
|
||||
)
|
||||
|
||||
make_branch(repo_c, 'master', 'initial', {'f': 'c0'})
|
||||
repo_c.make_commits(None, Commit('initial', tree={'f': 'c0'}), ref='heads/master')
|
||||
pr_c = make_pr(
|
||||
repo_c, 'a-thing', [{'f': 'c1'}],
|
||||
user=config['role_user']['token'],
|
||||
@ -637,19 +628,13 @@ class TestCompanionsNotReady:
|
||||
assert pr_b.comments == [
|
||||
(users['reviewer'], 'hansen r+'),
|
||||
seen(env, pr_b, users),
|
||||
(users['user'], "@%s @%s linked pull request(s) %s#%d not ready. Linked PRs are not staged until all of them are ready." % (
|
||||
users['user'], users['reviewer'],
|
||||
repo_a.name, pr_a.number
|
||||
))
|
||||
(users['user'], f"@{users['user']} @{users['reviewer']} linked pull request(s) {repo_a.name}#{pr_a.number} not ready. Linked PRs are not staged until all of them are ready.")
|
||||
]
|
||||
assert pr_c.comments == [
|
||||
(users['reviewer'], 'hansen r+'),
|
||||
seen(env, pr_c, users),
|
||||
(users['user'],
|
||||
"@%s @%s linked pull request(s) %s#%d not ready. Linked PRs are not staged until all of them are ready." % (
|
||||
users['user'], users['reviewer'],
|
||||
repo_a.name, pr_a.number
|
||||
))
|
||||
f"@{users['user']} @{users['reviewer']} linked pull request(s) {repo_a.name}#{pr_a.number} not ready. Linked PRs are not staged until all of them are ready.")
|
||||
]
|
||||
|
||||
def test_other_failed(env, project, repo_a, repo_b, users, config):
|
||||
@ -658,7 +643,7 @@ def test_other_failed(env, project, repo_a, repo_b, users, config):
|
||||
message
|
||||
"""
|
||||
with repo_a, repo_b:
|
||||
make_branch(repo_a, 'master', 'initial', {'a': 'a_0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master')
|
||||
# pr_a is born ready
|
||||
pr_a = make_pr(
|
||||
repo_a, 'do-a-thing', [{'a': 'a_1'}],
|
||||
@ -666,17 +651,15 @@ def test_other_failed(env, project, repo_a, repo_b, users, config):
|
||||
reviewer=config['role_reviewer']['token'],
|
||||
)
|
||||
|
||||
make_branch(repo_b, 'master', 'initial', {'a': 'b_0'})
|
||||
repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master')
|
||||
env.run_crons()
|
||||
|
||||
pr = to_pr(env, pr_a)
|
||||
assert pr.staging_id
|
||||
|
||||
with repo_a, repo_b:
|
||||
repo_a.post_status('heads/staging.master', 'success', 'legal/cla')
|
||||
repo_a.post_status('heads/staging.master', 'success', 'ci/runbot', target_url="http://example.org/a")
|
||||
repo_b.post_status('heads/staging.master', 'success', 'legal/cla')
|
||||
repo_b.post_status('heads/staging.master', 'failure', 'ci/runbot', target_url="http://example.org/b")
|
||||
repo_a.post_status('heads/staging.master', 'success', target_url="http://example.org/a")
|
||||
repo_b.post_status('heads/staging.master', 'failure', target_url="http://example.org/b")
|
||||
env.run_crons()
|
||||
|
||||
sth = repo_b.commit('heads/staging.master').id
|
||||
@ -685,7 +668,7 @@ def test_other_failed(env, project, repo_a, repo_b, users, config):
|
||||
assert pr_a.comments == [
|
||||
(users['reviewer'], 'hansen r+'),
|
||||
seen(env, pr_a, users),
|
||||
(users['user'], '@%s @%s staging failed: ci/runbot on %s (view more at http://example.org/b)' % (
|
||||
(users['user'], '@%s @%s staging failed: default on %s (view more at http://example.org/b)' % (
|
||||
users['user'], users['reviewer'],
|
||||
sth
|
||||
))
|
||||
@ -699,8 +682,8 @@ class TestMultiBatches:
|
||||
project.batch_limit = 3
|
||||
|
||||
with repo_a, repo_b:
|
||||
make_branch(repo_a, 'master', 'initial', {'a': 'a0'})
|
||||
make_branch(repo_b, 'master', 'initial', {'b': 'b0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': 'a0'}), ref='heads/master')
|
||||
repo_b.make_commits(None, Commit('initial', tree={'b': 'b0'}), ref='heads/master')
|
||||
|
||||
prs = [(
|
||||
a and make_pr(repo_a, 'batch{}'.format(i), [{'a{}'.format(i): 'a{}'.format(i)}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']),
|
||||
@ -732,8 +715,8 @@ class TestMultiBatches:
|
||||
""" If a staging fails, it should get split properly across repos
|
||||
"""
|
||||
with repo_a, repo_b:
|
||||
make_branch(repo_a, 'master', 'initial', {'a': 'a0'})
|
||||
make_branch(repo_b, 'master', 'initial', {'b': 'b0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': 'a0'}), ref='heads/master')
|
||||
repo_b.make_commits(None, Commit('initial', tree={'b': 'b0'}), ref='heads/master')
|
||||
|
||||
prs = [(
|
||||
a and make_pr(repo_a, 'batch{}'.format(i), [{'a{}'.format(i): 'a{}'.format(i)}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']),
|
||||
@ -754,8 +737,7 @@ class TestMultiBatches:
|
||||
# mark b.staging as failed -> should create two splits with (0, 1)
|
||||
# and (2, 3, 4) and stage the first one
|
||||
with repo_b:
|
||||
repo_b.post_status('heads/staging.master', 'success', 'legal/cla')
|
||||
repo_b.post_status('heads/staging.master', 'failure', 'ci/runbot')
|
||||
repo_b.post_status('heads/staging.master', 'failure')
|
||||
env.run_crons()
|
||||
|
||||
assert not st0.active
|
||||
@ -775,32 +757,44 @@ class TestMultiBatches:
|
||||
prs[2][0] | prs[2][1] | prs[3][0] | prs[3][1] | prs[4][0]
|
||||
|
||||
def test_urgent(env, repo_a, repo_b, config):
|
||||
""" Either PR of a co-dependent pair being p=0 leads to the entire pair
|
||||
being prioritized
|
||||
""" Either PR of a co-dependent pair being prioritised leads to the entire
|
||||
pair being prioritized
|
||||
"""
|
||||
with repo_a, repo_b:
|
||||
make_branch(repo_a, 'master', 'initial', {'a0': 'a'})
|
||||
make_branch(repo_b, 'master', 'initial', {'b0': 'b'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a0': 'a'}), ref='heads/master')
|
||||
repo_b.make_commits(None, Commit('initial', tree={'b0': 'b'}), ref='heads/master')
|
||||
|
||||
pr_a = make_pr(repo_a, 'batch', [{'a1': 'a'}, {'a2': 'a'}], user=config['role_user']['token'], reviewer=None, statuses=[])
|
||||
pr_b = make_pr(repo_b, 'batch', [{'b1': 'b'}, {'b2': 'b'}], user=config['role_user']['token'], reviewer=None, statuses=[])
|
||||
pr_c = make_pr(repo_a, 'C', [{'c1': 'c', 'c2': 'c'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'],)
|
||||
pr_c = make_pr(repo_a, 'C', [{'c1': 'c', 'c2': 'c'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'])
|
||||
|
||||
pr_a.post_comment('hansen rebase-merge', config['role_reviewer']['token'])
|
||||
pr_b.post_comment('hansen rebase-merge p=0', config['role_reviewer']['token'])
|
||||
pr_b.post_comment('hansen rebase-merge alone skipchecks', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
# should have batched pr_a and pr_b despite neither being reviewed or
|
||||
# approved
|
||||
p_a, p_b = to_pr(env, pr_a), to_pr(env, pr_b)
|
||||
p_c = to_pr(env, pr_c)
|
||||
|
||||
p_a, p_b, p_c = to_pr(env, pr_a), to_pr(env, pr_b), to_pr(env, pr_c)
|
||||
assert not p_a.blocked
|
||||
assert not p_b.blocked
|
||||
|
||||
assert p_a.staging_id and p_b.staging_id and p_a.staging_id == p_b.staging_id,\
|
||||
"a and b should be staged despite neither beinbg reviewed or approved"
|
||||
assert p_a.batch_id and p_b.batch_id and p_a.batch_id == p_b.batch_id,\
|
||||
"a and b should have been recognised as co-dependent"
|
||||
assert not p_c.staging_id
|
||||
|
||||
with repo_a:
|
||||
pr_a.post_comment('hansen r-', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
assert not p_b.staging_id.active, "should be unstaged"
|
||||
assert p_b.priority == 'alone', "priority should not be affected anymore"
|
||||
assert not p_b.skipchecks, "r- of linked pr should have un-skipcheck-ed this one"
|
||||
assert p_a.blocked
|
||||
assert p_b.blocked
|
||||
|
||||
class TestBlocked:
|
||||
def test_merge_method(self, env, repo_a, config):
|
||||
with repo_a:
|
||||
make_branch(repo_a, 'master', 'initial', {'a0': 'a'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a0': 'a'}), ref='heads/master')
|
||||
|
||||
pr = make_pr(repo_a, 'A', [{'a1': 'a'}, {'a2': 'a'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'],)
|
||||
env.run_crons()
|
||||
@ -814,33 +808,55 @@ class TestBlocked:
|
||||
|
||||
def test_linked_closed(self, env, repo_a, repo_b, config):
|
||||
with repo_a, repo_b:
|
||||
make_branch(repo_a, 'master', 'initial', {'a0': 'a'})
|
||||
make_branch(repo_b, 'master', 'initial', {'b0': 'b'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a0': 'a'}), ref='heads/master')
|
||||
repo_b.make_commits(None, Commit('initial', tree={'b0': 'b'}), ref='heads/master')
|
||||
|
||||
pr = make_pr(repo_a, 'xxx', [{'a1': 'a'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'],)
|
||||
b = make_pr(repo_b, 'xxx', [{'b1': 'b'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], statuses=[])
|
||||
pr1_a = make_pr(repo_a, 'xxx', [{'a1': 'a'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'],)
|
||||
pr1_b = make_pr(repo_b, 'xxx', [{'b1': 'b'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], statuses=[])
|
||||
env.run_crons()
|
||||
|
||||
p = to_pr(env, pr)
|
||||
assert p.blocked
|
||||
with repo_b: b.close()
|
||||
# FIXME: find a way for PR.blocked to depend on linked PR somehow so this isn't needed
|
||||
p.invalidate_cache(['blocked'], [p.id])
|
||||
assert not p.blocked
|
||||
head_a = repo_a.commit('master').id
|
||||
head_b = repo_b.commit('master').id
|
||||
pr1_a_id = to_pr(env, pr1_a)
|
||||
pr1_b_id = to_pr(env, pr1_b)
|
||||
assert pr1_a_id.blocked
|
||||
with repo_b: pr1_b.close()
|
||||
assert not pr1_a_id.blocked
|
||||
assert len(pr1_a_id.batch_id.all_prs) == 2
|
||||
assert pr1_a_id.state == 'ready'
|
||||
assert pr1_b_id.state == 'closed'
|
||||
env.run_crons()
|
||||
assert pr1_a_id.staging_id
|
||||
with repo_a, repo_b:
|
||||
repo_a.post_status('staging.master', 'success')
|
||||
repo_b.post_status('staging.master', 'success')
|
||||
env.run_crons()
|
||||
assert pr1_a_id.state == 'merged'
|
||||
assert pr1_a_id.batch_id.merge_date
|
||||
assert repo_a.commit('master').id != head_a, \
|
||||
"the master of repo A should be updated"
|
||||
assert repo_b.commit('master').id == head_b, \
|
||||
"the master of repo B should not be updated"
|
||||
|
||||
with repo_a:
|
||||
pr2_a = make_pr(repo_a, "xxx", [{'x': 'x'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
pr2_a_id = to_pr(env, pr2_a)
|
||||
assert pr2_a_id.batch_id != pr1_a_id.batch_id
|
||||
assert pr2_a_id.label == pr1_a_id.label
|
||||
assert len(pr2_a_id.batch_id.all_prs) == 1
|
||||
|
||||
def test_linked_merged(self, env, repo_a, repo_b, config):
|
||||
with repo_a, repo_b:
|
||||
make_branch(repo_a, 'master', 'initial', {'a0': 'a'})
|
||||
make_branch(repo_b, 'master', 'initial', {'b0': 'b'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a0': 'a'}), ref='heads/master')
|
||||
repo_b.make_commits(None, Commit('initial', tree={'b0': 'b'}), ref='heads/master')
|
||||
|
||||
b = make_pr(repo_b, 'xxx', [{'b1': 'b'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'],)
|
||||
env.run_crons() # stage b and c
|
||||
|
||||
with repo_a, repo_b:
|
||||
repo_a.post_status('heads/staging.master', 'success', 'legal/cla')
|
||||
repo_a.post_status('heads/staging.master', 'success', 'ci/runbot')
|
||||
repo_b.post_status('heads/staging.master', 'success', 'legal/cla')
|
||||
repo_b.post_status('heads/staging.master', 'success', 'ci/runbot')
|
||||
repo_a.post_status('heads/staging.master', 'success')
|
||||
repo_b.post_status('heads/staging.master', 'success')
|
||||
env.run_crons() # merge b and c
|
||||
assert to_pr(env, b).state == 'merged'
|
||||
|
||||
@ -854,12 +870,12 @@ class TestBlocked:
|
||||
def test_linked_unready(self, env, repo_a, repo_b, config):
|
||||
""" Create a PR A linked to a non-ready PR B,
|
||||
* A is blocked by default
|
||||
* A is not blocked if A.p=0
|
||||
* A is not blocked if B.p=0
|
||||
* A is not blocked if A.skipci
|
||||
* A is not blocked if B.skipci
|
||||
"""
|
||||
with repo_a, repo_b:
|
||||
make_branch(repo_a, 'master', 'initial', {'a0': 'a'})
|
||||
make_branch(repo_b, 'master', 'initial', {'b0': 'b'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a0': 'a'}), ref='heads/master')
|
||||
repo_b.make_commits(None, Commit('initial', tree={'b0': 'b'}), ref='heads/master')
|
||||
|
||||
a = make_pr(repo_a, 'xxx', [{'a1': 'a'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'],)
|
||||
b = make_pr(repo_b, 'xxx', [{'b1': 'b'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], statuses=[])
|
||||
@ -868,13 +884,11 @@ class TestBlocked:
|
||||
pr_a = to_pr(env, a)
|
||||
assert pr_a.blocked
|
||||
|
||||
with repo_a: a.post_comment('hansen p=0', config['role_reviewer']['token'])
|
||||
with repo_a: a.post_comment('hansen skipchecks', config['role_reviewer']['token'])
|
||||
assert not pr_a.blocked
|
||||
pr_a.skipchecks = False
|
||||
|
||||
with repo_a: a.post_comment('hansen p=2', config['role_reviewer']['token'])
|
||||
assert pr_a.blocked
|
||||
|
||||
with repo_b: b.post_comment('hansen p=0', config['role_reviewer']['token'])
|
||||
with repo_b: b.post_comment('hansen skipchecks', config['role_reviewer']['token'])
|
||||
assert not pr_a.blocked
|
||||
|
||||
def test_different_branches(env, project, repo_a, repo_b, config):
|
||||
@ -885,9 +899,9 @@ def test_different_branches(env, project, repo_a, repo_b, config):
|
||||
env['runbot_merge.repository'].search([('name', '=', repo_b.name)])\
|
||||
.branch_filter = '[("name", "=", "master")]'
|
||||
with repo_a, repo_b:
|
||||
make_branch(repo_a, 'dev', 'initial', {'a': '0'})
|
||||
make_branch(repo_a, 'master', 'initial', {'b': '0'})
|
||||
make_branch(repo_b, 'master', 'initial', {'b': '0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': '0'}), ref='heads/dev')
|
||||
repo_a.make_commits(None, Commit('initial', tree={'b': '0'}), ref='heads/master')
|
||||
repo_b.make_commits(None, Commit('initial', tree={'b': '0'}), ref='heads/master')
|
||||
|
||||
pr_a = make_pr(
|
||||
repo_a, 'xxx', [{'a': '1'}],
|
||||
@ -899,8 +913,7 @@ def test_different_branches(env, project, repo_a, repo_b, config):
|
||||
|
||||
with repo_a:
|
||||
pr_a.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
repo_a.post_status('heads/staging.dev', 'success', 'legal/cla')
|
||||
repo_a.post_status('heads/staging.dev', 'success', 'ci/runbot')
|
||||
repo_a.post_status('heads/staging.dev', 'success')
|
||||
env.run_crons()
|
||||
|
||||
assert to_pr(env, pr_a).state == 'merged'
|
||||
@ -979,9 +992,9 @@ class TestSubstitutions:
|
||||
repo_b_id.substitutions = r"/.+:/%s:/" % repo_a.owner
|
||||
|
||||
with repo_a:
|
||||
make_branch(repo_a, 'master', 'initial', {'a': '0'})
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': '0'}), ref='heads/master')
|
||||
with repo_b:
|
||||
make_branch(repo_b, 'master', 'initial', {'b': '0'})
|
||||
repo_b.make_commits(None, Commit('initial', tree={'b': '0'}), ref='heads/master')
|
||||
|
||||
# policy is that repo_a PRs are created in the same repo while repo_b PRs
|
||||
# are created in personal forks
|
||||
@ -996,24 +1009,16 @@ class TestSubstitutions:
|
||||
target='master', head='%s:abranch' % b_fork.owner
|
||||
)
|
||||
|
||||
pra_id = env['runbot_merge.pull_requests'].search([
|
||||
('repository.name', '=', repo_a.name),
|
||||
('number', '=', pra.number)
|
||||
])
|
||||
prb_id = env['runbot_merge.pull_requests'].search([
|
||||
('repository.name', '=', repo_b.name),
|
||||
('number', '=', prb.number)
|
||||
])
|
||||
pra_id = to_pr(env, pra)
|
||||
prb_id = to_pr(env, prb)
|
||||
assert pra_id.label.endswith(':abranch')
|
||||
assert prb_id.label.endswith(':abranch')
|
||||
|
||||
with repo_a, repo_b:
|
||||
repo_a.post_status(pra.head, 'success', 'legal/cla')
|
||||
repo_a.post_status(pra.head, 'success', 'ci/runbot')
|
||||
repo_a.post_status(pra.head, 'success')
|
||||
pra.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
|
||||
repo_b.post_status(prb.head, 'success', 'legal/cla')
|
||||
repo_b.post_status(prb.head, 'success', 'ci/runbot')
|
||||
repo_b.post_status(prb.head, 'success')
|
||||
prb.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
env.run_crons()
|
||||
|
||||
@ -1104,11 +1109,9 @@ def test_multi_project(env, make_repo, setreviewers, users, config,
|
||||
|
||||
assert pr1.comments == [
|
||||
(users['reviewer'], 'hansen r+'),
|
||||
(users['user'], f'[Pull request status dashboard]({pr1_id.url}).'),
|
||||
]
|
||||
assert pr2.comments == [
|
||||
(users['user'], f'[Pull request status dashboard]({pr2_id.url}).'),
|
||||
seen(env, pr1, users),
|
||||
]
|
||||
assert pr2.comments == [seen(env, pr2, users)]
|
||||
|
||||
def test_freeze_complete(env, project, repo_a, repo_b, repo_c, users, config):
|
||||
""" Tests the freeze wizard feature (aside from the UI):
|
||||
@ -1126,6 +1129,8 @@ def test_freeze_complete(env, project, repo_a, repo_b, repo_c, users, config):
|
||||
* check that freeze goes through
|
||||
* check that reminder is shown
|
||||
* check that new branches are created w/ correct parent & commit info
|
||||
* check that a PRs (freeze and bump) are part of synthetic stagings so
|
||||
they're correctly accounted for in the change history
|
||||
"""
|
||||
project.freeze_reminder = "Don't forget to like and subscribe"
|
||||
|
||||
@ -1181,18 +1186,15 @@ def test_freeze_complete(env, project, repo_a, repo_b, repo_c, users, config):
|
||||
|
||||
with repo_a:
|
||||
pr_required_a.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
repo_a.post_status(pr_required_a.head, 'success', 'ci/runbot')
|
||||
repo_a.post_status(pr_required_a.head, 'success', 'legal/cla')
|
||||
repo_a.post_status(pr_required_a.head, 'success')
|
||||
with repo_c:
|
||||
pr_required_c.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
repo_c.post_status(pr_required_c.head, 'success', 'ci/runbot')
|
||||
repo_c.post_status(pr_required_c.head, 'success', 'legal/cla')
|
||||
repo_c.post_status(pr_required_c.head, 'success')
|
||||
env.run_crons()
|
||||
|
||||
for repo in [repo_a, repo_b, repo_c]:
|
||||
with repo:
|
||||
repo.post_status('staging.master', 'success', 'ci/runbot')
|
||||
repo.post_status('staging.master', 'success', 'legal/cla')
|
||||
repo.post_status('staging.master', 'success')
|
||||
env.run_crons()
|
||||
|
||||
assert to_pr(env, pr_required_a).state == 'merged'
|
||||
@ -1215,22 +1217,35 @@ def test_freeze_complete(env, project, repo_a, repo_b, repo_c, users, config):
|
||||
assert r['res_model'] == 'runbot_merge.project'
|
||||
assert r['res_id'] == project.id
|
||||
|
||||
release_pr_ids = functools.reduce(operator.add, release_prs.values())
|
||||
# stuff that's done directly
|
||||
for pr_id in release_prs.values():
|
||||
assert pr_id.state == 'merged'
|
||||
assert all(pr_id.state == 'merged' for pr_id in release_pr_ids)
|
||||
assert pr_bump_id.state == 'merged'
|
||||
assert pr_bump_id.commits_map != '{}'
|
||||
|
||||
assert len(release_pr_ids.batch_id) == 1
|
||||
assert release_pr_ids.batch_id.merge_date
|
||||
assert release_pr_ids.batch_id.staging_ids.target.name == '1.1'
|
||||
assert release_pr_ids.batch_id.staging_ids.state == 'success'
|
||||
|
||||
assert pr_bump_id.batch_id.merge_date
|
||||
assert pr_bump_id.batch_id.staging_ids.target.name == 'master'
|
||||
assert pr_bump_id.batch_id.staging_ids.state == 'success'
|
||||
|
||||
# stuff that's behind a cron
|
||||
env.run_crons()
|
||||
|
||||
# check again to be sure
|
||||
assert all(pr_id.state == 'merged' for pr_id in release_pr_ids)
|
||||
assert pr_bump_id.state == 'merged'
|
||||
|
||||
assert pr_rel_a.state == "closed"
|
||||
assert pr_rel_a.base['ref'] == '1.1'
|
||||
assert pr_rel_b.state == "closed"
|
||||
assert pr_rel_b.base['ref'] == '1.1'
|
||||
assert pr_rel_c.state == "closed"
|
||||
assert pr_rel_c.base['ref'] == '1.1'
|
||||
for pr_id in release_prs.values():
|
||||
assert pr_id.target.name == '1.1'
|
||||
assert all(pr_id.target.name == '1.1' for pr_id in release_pr_ids)
|
||||
|
||||
assert pr_bump_a.state == 'closed'
|
||||
assert pr_bump_a.base['ref'] == 'master'
|
||||
@ -1453,3 +1468,63 @@ def test_freeze_conflict(env, project, repo_a, repo_b, repo_c, users, config):
|
||||
with pytest.raises(AssertionError) as e:
|
||||
repo_b.get_ref('heads/1.1')
|
||||
assert e.value.args[0].startswith("Not Found")
|
||||
|
||||
def test_cancel_staging(env, project, repo_a, repo_b, users, config):
|
||||
"""If a batch is flagged as staging cancelling (from any PR), the staging
|
||||
should get cancelled if and when the batch transitions to unblocked
|
||||
"""
|
||||
with repo_a, repo_b:
|
||||
repo_a.make_commits(None, Commit('initial', tree={'a': '1'}), ref='heads/master')
|
||||
repo_b.make_commits(None, Commit('initial', tree={'b': '1'}), ref='heads/master')
|
||||
|
||||
pr_a = make_pr(repo_a, 'batch', [{'a': '2'}], user=config['role_user']['token'], statuses=[], reviewer=None)
|
||||
pr_b = make_pr(repo_b, 'batch', [{'b': '2'}], user=config['role_user']['token'], statuses=[], reviewer=None)
|
||||
pr_lone = make_pr(
|
||||
repo_a,
|
||||
"C",
|
||||
[{'c': '1'}],
|
||||
user=config['role_user']['token'],
|
||||
reviewer=config['role_reviewer']['token'],
|
||||
)
|
||||
env.run_crons()
|
||||
|
||||
a_id, b_id, lone_id = map(to_pr, repeat(env), [pr_a, pr_b, pr_lone])
|
||||
assert lone_id.staging_id
|
||||
st = lone_id.staging_id
|
||||
|
||||
with repo_a:
|
||||
pr_a.post_comment("hansen cancel=staging", config['role_reviewer']['token'])
|
||||
assert a_id.state == 'opened'
|
||||
assert a_id.cancel_staging
|
||||
assert b_id.cancel_staging
|
||||
assert lone_id.staging_id == st
|
||||
with repo_a:
|
||||
pr_a.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
assert a_id.state == 'approved'
|
||||
assert lone_id.staging_id == st
|
||||
with repo_a:
|
||||
repo_a.post_status(a_id.head, 'success')
|
||||
env.run_crons()
|
||||
assert a_id.state == 'ready'
|
||||
assert lone_id.staging_id == st
|
||||
|
||||
assert b_id.state == 'opened'
|
||||
with repo_b:
|
||||
pr_b.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
assert b_id.state == 'approved'
|
||||
assert lone_id.staging_id == st
|
||||
with repo_b:
|
||||
repo_b.post_status(b_id.head, 'success')
|
||||
assert b_id.state == 'approved'
|
||||
assert lone_id.staging_id == st
|
||||
env.run_crons()
|
||||
assert b_id.state == 'ready'
|
||||
# should have cancelled the staging, picked a and b, and re-staged the
|
||||
# entire thing
|
||||
assert lone_id.staging_id != st
|
||||
|
||||
assert len({
|
||||
lone_id.staging_id.id,
|
||||
a_id.staging_id.id,
|
||||
b_id.staging_id.id,
|
||||
}) == 1
|
||||
|
@ -252,3 +252,27 @@ def test_merge_emptying_commits(env, project, make_repo, setreviewers, users, co
|
||||
assert pr3.comments[3:] == [
|
||||
(users['user'], f"{ping} unable to stage: results in an empty tree when merged, might be the duplicate of a merged PR.")
|
||||
]
|
||||
|
||||
def test_force_ready(env, make_repo, project, setreviewers, config):
|
||||
repo = make_repo('repo')
|
||||
project.write({'repo_ids': [(0, 0, {
|
||||
'name': repo.name,
|
||||
'group_id': False,
|
||||
'required_statuses': 'default',
|
||||
})]})
|
||||
setreviewers(*project.repo_ids)
|
||||
|
||||
with repo:
|
||||
[m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref="heads/master")
|
||||
|
||||
[c] = repo.make_commits(m, Commit('first', tree={'m': 'c1'}), ref="heads/other")
|
||||
pr = repo.make_pr(title='title', body='body', target='master', head=c)
|
||||
env.run_crons()
|
||||
|
||||
pr_id = to_pr(env, pr)
|
||||
pr_id.state = 'ready'
|
||||
|
||||
assert pr_id.state == 'ready'
|
||||
assert pr_id.status == 'pending'
|
||||
reviewer = env['res.users'].browse([env._uid]).partner_id
|
||||
assert pr_id.reviewed_by == reviewer
|
||||
|
129
runbot_merge/tests/test_project_toggles.py
Normal file
129
runbot_merge/tests/test_project_toggles.py
Normal file
@ -0,0 +1,129 @@
|
||||
import functools
|
||||
from itertools import repeat
|
||||
|
||||
import pytest
|
||||
|
||||
from utils import Commit, to_pr, ensure_one
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def repo(env, project, make_repo, users, setreviewers):
|
||||
r = make_repo('repo')
|
||||
project.write({'repo_ids': [(0, 0, {
|
||||
'name': r.name,
|
||||
'group_id': False,
|
||||
'required_statuses': 'default',
|
||||
})]})
|
||||
setreviewers(*project.repo_ids)
|
||||
return r
|
||||
|
||||
def test_disable_staging(env, project, repo, config):
|
||||
"""In order to avoid issues of cron locking, as well as not disable staging
|
||||
for every project when trying to freeze just one of them (cough cough), a
|
||||
toggle is available on the project to skip staging for it.
|
||||
"""
|
||||
with repo:
|
||||
[m] = repo.make_commits(None, Commit("m", tree={"a": "1"}), ref="heads/master")
|
||||
|
||||
[c] = repo.make_commits(m, Commit("c", tree={"a": "2"}), ref="heads/other")
|
||||
pr = repo.make_pr(title="whatever", target="master", head="other")
|
||||
pr.post_comment("hansen r+", config["role_reviewer"]['token'])
|
||||
repo.post_status(c, "success")
|
||||
env.run_crons()
|
||||
|
||||
pr_id = to_pr(env, pr)
|
||||
staging_1 = pr_id.staging_id
|
||||
assert staging_1.active
|
||||
|
||||
project.staging_enabled = False
|
||||
staging_1.cancel("because")
|
||||
|
||||
env.run_crons()
|
||||
|
||||
assert staging_1.active is False
|
||||
assert staging_1.state == "cancelled"
|
||||
assert not pr_id.staging_id.active,\
|
||||
"should not be re-staged, because staging has been disabled"
|
||||
|
||||
@pytest.mark.parametrize('mode,cutoff,second', [
|
||||
# default mode, the second staging is the first half of the first staging
|
||||
('default', 2, [0]),
|
||||
# splits are right-biased (the midpoint is rounded down), so for odd
|
||||
# staging sizes the first split is the smaller one
|
||||
('default', 3, [0]),
|
||||
# if the split results in ((1, 2), 1), largest stages the second
|
||||
('largest', 3, [1, 2]),
|
||||
# if the split results in ((1, 1), 2), largest stages the ready PRs
|
||||
('largest', 2, [2, 3]),
|
||||
# even if it's a small minority, ready selects the ready PR(s)
|
||||
('ready', 3, [3]),
|
||||
('ready', 2, [2, 3]),
|
||||
])
|
||||
def test_staging_priority(env, project, repo, config, mode, cutoff, second):
|
||||
"""By default, unless a PR is prioritised as "alone" splits take priority
|
||||
over new stagings.
|
||||
|
||||
*However* to try and maximise throughput in trying times, it's possible to
|
||||
configure the project to prioritise either the largest staging (between spit
|
||||
and ready batches), or to just prioritise new stagings.
|
||||
"""
|
||||
def select(prs, indices):
|
||||
zero = env['runbot_merge.pull_requests']
|
||||
filtered = (p for i, p in enumerate(prs) if i in indices)
|
||||
return functools.reduce(lambda a, b: a | b, filtered, zero)
|
||||
|
||||
project.staging_priority = mode
|
||||
# we need at least 3 PRs, two that we can split out, and one leftover
|
||||
with repo:
|
||||
[m] = repo.make_commits(None, Commit("m", tree={"ble": "1"}), ref="heads/master")
|
||||
|
||||
[c] = repo.make_commits(m, Commit("c", tree={"1": "1"}), ref="heads/pr1")
|
||||
pr1 = repo.make_pr(title="whatever", target="master", head="pr1")
|
||||
|
||||
[c] = repo.make_commits(m, Commit("c", tree={"2": "2"}), ref="heads/pr2")
|
||||
pr2 = repo.make_pr(title="whatever", target="master", head="pr2")
|
||||
|
||||
[c] = repo.make_commits(m, Commit("c", tree={"3": "3"}), ref="heads/pr3")
|
||||
pr3 = repo.make_pr(title="whatever", target="master", head="pr3")
|
||||
|
||||
[c] = repo.make_commits(m, Commit("c", tree={"4": "4"}), ref="heads/pr4")
|
||||
pr4 = repo.make_pr(title="whatever", target="master", head="pr4")
|
||||
|
||||
prs = [pr1, pr2, pr3, pr4]
|
||||
pr_ids = functools.reduce(
|
||||
lambda a, b: a | b,
|
||||
map(to_pr, repeat(env), prs)
|
||||
)
|
||||
# ready the PRs for the initial staging (to split)
|
||||
pre_cutoff = pr_ids[:cutoff]
|
||||
with repo:
|
||||
for pr, pr_id in zip(prs[:cutoff], pre_cutoff):
|
||||
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
repo.post_status(pr_id.head, 'success')
|
||||
env.run_crons()
|
||||
# check they staged as expected
|
||||
assert all(p.staging_id for p in pre_cutoff)
|
||||
staging = ensure_one(env['runbot_merge.stagings'].search([]))
|
||||
ensure_one(pre_cutoff.staging_id)
|
||||
|
||||
# ready the rest
|
||||
with repo:
|
||||
for pr, pr_id in zip(prs[cutoff:], pr_ids[cutoff:]):
|
||||
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||
repo.post_status(pr_id.head, 'success')
|
||||
env.run_crons('runbot_merge.process_updated_commits')
|
||||
assert not pr_ids.filtered(lambda p: p.blocked)
|
||||
|
||||
# trigger a split
|
||||
with repo:
|
||||
repo.post_status('staging.master', 'failure')
|
||||
env.run_crons('runbot_merge.process_updated_commits', 'runbot_merge.merge_cron')
|
||||
assert not staging.active
|
||||
assert not env['runbot_merge.stagings'].search([]).active
|
||||
assert env['runbot_merge.split'].search_count([]) == 2
|
||||
|
||||
env.run_crons()
|
||||
|
||||
# check that st.pr_ids are the PRs we expect
|
||||
st = env['runbot_merge.stagings'].search([])
|
||||
assert st.pr_ids == select(pr_ids, second)
|
@ -9,7 +9,7 @@ def repo(env, project, make_repo, users, setreviewers):
|
||||
project.write({'repo_ids': [(0, 0, {
|
||||
'name': r.name,
|
||||
'group_id': False,
|
||||
'required_statuses': 'ci'
|
||||
'required_statuses': 'default'
|
||||
})]})
|
||||
setreviewers(*project.repo_ids)
|
||||
return r
|
||||
@ -26,13 +26,13 @@ def test_staging_disabled_branch(env, project, repo, config):
|
||||
[c1] = repo.make_commits(master_commit, Commit("thing", tree={'a': '2'}), ref='heads/master-thing')
|
||||
master_pr = repo.make_pr(title="whatever", target="master", head="master-thing")
|
||||
master_pr.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||
repo.post_status(c1, 'success', 'ci')
|
||||
repo.post_status(c1, 'success')
|
||||
|
||||
[other_commit] = repo.make_commits(None, Commit("other", tree={'b': '1'}), ref='heads/other')
|
||||
[c2] = repo.make_commits(other_commit, Commit("thing", tree={'b': '2'}), ref='heads/other-thing')
|
||||
other_pr = repo.make_pr(title="whatever", target="other", head="other-thing")
|
||||
other_pr.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||
repo.post_status(c2, 'success', 'ci')
|
||||
repo.post_status(c2, 'success')
|
||||
env.run_crons()
|
||||
|
||||
assert to_pr(env, master_pr).staging_id, \
|
||||
|
@ -89,7 +89,7 @@ def test_basic(env, project, make_repo, users, setreviewers, config):
|
||||
(users['reviewer'], 'hansen r+'),
|
||||
seen(env, pr, users),
|
||||
(users['reviewer'], 'hansen override=l/int'),
|
||||
(users['user'], "I'm sorry, @{}: you are not allowed to override this status.".format(users['reviewer'])),
|
||||
(users['user'], "@{} you are not allowed to override 'l/int'.".format(users['reviewer'])),
|
||||
(users['other'], "hansen override=l/int"),
|
||||
]
|
||||
assert pr_id.statuses == '{}'
|
||||
|
62
runbot_merge/views/batch.xml
Normal file
62
runbot_merge/views/batch.xml
Normal file
@ -0,0 +1,62 @@
|
||||
<odoo>
|
||||
<record id="runbot_merge_batch_form" model="ir.ui.view">
|
||||
<field name="name">Batch form</field>
|
||||
<field name="model">runbot_merge.batch</field>
|
||||
<field name="arch" type="xml">
|
||||
<form>
|
||||
<sheet>
|
||||
<div class="oe_title"><h1><field name="name"/></h1></div>
|
||||
<group>
|
||||
<group>
|
||||
<field name="target"/>
|
||||
<field name="merge_date"/>
|
||||
<field name="priority" attrs="{'invisible': [('merge_date', '!=', False)]}"/>
|
||||
<field name="skipchecks" widget="boolean_toggle" attrs="{'invisible': [('merge_date', '!=', False)]}"/>
|
||||
<field name="cancel_staging" widget="boolean_toggle" attrs="{'invisible': [('merge_date', '!=', False)]}"/>
|
||||
<field name="fw_policy"/>
|
||||
</group>
|
||||
<group>
|
||||
<field name="blocked"/>
|
||||
</group>
|
||||
</group>
|
||||
<group string="Pull Requests">
|
||||
<group colspan="4">
|
||||
<field name="all_prs" nolabel="1" readonly="1">
|
||||
<tree>
|
||||
<button type="object" name="get_formview_action" icon="fa-external-link"/>
|
||||
<field name="display_name"/>
|
||||
<field name="repository"/>
|
||||
<field name="state"/>
|
||||
</tree>
|
||||
</field>
|
||||
</group>
|
||||
</group>
|
||||
<group string="Genealogy">
|
||||
<group colspan="4">
|
||||
<field name="genealogy_ids" nolabel="1" readonly="1">
|
||||
<tree decoration-muted="id == parent.id">
|
||||
<button type="object" name="get_formview_action" icon="fa-external-link"/>
|
||||
<field name="name"/>
|
||||
<field name="target"/>
|
||||
<field name="all_prs" widget="many2many_tags"/>
|
||||
</tree>
|
||||
</field>
|
||||
</group>
|
||||
</group>
|
||||
<group string="Stagings">
|
||||
<group colspan="4">
|
||||
<field name="staging_ids" nolabel="1" readonly="1">
|
||||
<tree>
|
||||
<button type="object" name="get_formview_action" icon="fa-external-link"/>
|
||||
<field name="staged_at"/>
|
||||
<field name="state"/>
|
||||
<field name="reason"/>
|
||||
</tree>
|
||||
</field>
|
||||
</group>
|
||||
</group>
|
||||
</sheet>
|
||||
</form>
|
||||
</field>
|
||||
</record>
|
||||
</odoo>
|
@ -123,51 +123,138 @@
|
||||
<field name="model">runbot_merge.pull_requests</field>
|
||||
<field name="arch" type="xml">
|
||||
<form>
|
||||
<header/>
|
||||
<div class="o_form_statusbar">
|
||||
<span class="o_statusbar_buttons">
|
||||
<field name="github_url" widget="url" class="btn btn-secondary" text="Github"/>
|
||||
<field name="url" widget="url" class="btn btn-secondary" text="Frontend"/></span>
|
||||
</div>
|
||||
<sheet>
|
||||
<div class="oe_title">
|
||||
<h1>
|
||||
<field name="repository"/>#<field name="number"/>
|
||||
</h1>
|
||||
<h2>
|
||||
<field name="state"/>
|
||||
<span attrs="{'invisible': ['|', ('state', '=', 'merged'), ('blocked', '=', False)]}">
|
||||
(blocked: <field name="blocked"/>)
|
||||
</span>
|
||||
<span attrs="{'invisible': [('state', '!=', 'merged')]}">
|
||||
(<field name="merge_date"/>)
|
||||
</span>
|
||||
</h2>
|
||||
</div>
|
||||
<group>
|
||||
<!-- main PR metadata -->
|
||||
<group name="metadata">
|
||||
<group>
|
||||
<field name="batch_id"/>
|
||||
<field name="target"/>
|
||||
<field name="state"/>
|
||||
<field name="author"/>
|
||||
</group>
|
||||
<group>
|
||||
<field name="label"/>
|
||||
<field name="priority"/>
|
||||
<field name="squash"/>
|
||||
</group>
|
||||
</group>
|
||||
<group>
|
||||
<group colspan="4">
|
||||
<field name="author"/>
|
||||
<field name="head"/>
|
||||
<field name="statuses"/>
|
||||
</group>
|
||||
<group colspan="4">
|
||||
<field name="overrides"/>
|
||||
</group>
|
||||
</group>
|
||||
<group>
|
||||
<group colspan="4" string="Message">
|
||||
<notebook>
|
||||
<page name="state" string="State">
|
||||
<group>
|
||||
<group>
|
||||
<field name="reviewed_by"/>
|
||||
<field name="closed"/>
|
||||
<field name="error"/>
|
||||
</group>
|
||||
<group>
|
||||
<field name="status"/>
|
||||
<details colspan="4">
|
||||
<summary>Commit Statuses</summary>
|
||||
|
||||
<field name="statuses"/>
|
||||
</details>
|
||||
<details colspan="4">
|
||||
<summary>Overrides</summary>
|
||||
<field name="overrides"/>
|
||||
</details>
|
||||
</group>
|
||||
</group>
|
||||
<group>
|
||||
<group colspan="4">
|
||||
<field name="blocked"/>
|
||||
</group>
|
||||
</group>
|
||||
</page>
|
||||
<page name="configuration" string="Configuration">
|
||||
<group>
|
||||
<group>
|
||||
<field name="merge_method"/>
|
||||
<field name="squash"/>
|
||||
<field name="draft"/>
|
||||
<field name="limit_id"/>
|
||||
</group>
|
||||
<group>
|
||||
<field name="priority"/>
|
||||
<field name="skipchecks" widget="boolean_toggle"/>
|
||||
<field name="cancel_staging" widget="boolean_toggle"/>
|
||||
</group>
|
||||
</group>
|
||||
<group string="Delegates">
|
||||
<group colspan="4">
|
||||
<field name="delegates" nolabel="1">
|
||||
<tree>
|
||||
<button type="object" name="get_formview_action" icon="fa-external-link"/>
|
||||
<field name="name"/>
|
||||
<field name="github_login"/>
|
||||
</tree>
|
||||
</field>
|
||||
</group>
|
||||
</group>
|
||||
</page>
|
||||
<page name="stagings" string="Staging History">
|
||||
<group>
|
||||
<group colspan="4">
|
||||
<field name="staging_ids" nolabel="1" readonly="1">
|
||||
<tree>
|
||||
<button type="object" name="get_formview_action" icon="fa-external-link"/>
|
||||
<field name="staged_at"/>
|
||||
<field name="state"/>
|
||||
<field name="reason"/>
|
||||
</tree>
|
||||
</field>
|
||||
</group>
|
||||
</group>
|
||||
</page>
|
||||
<page name="porting" string="Forward-Porting">
|
||||
<group>
|
||||
<group>
|
||||
<field string="Original PR" name="source_id"/>
|
||||
<field name="parent_id"/>
|
||||
<field
|
||||
attrs="{'invisible': [('parent_id', '=', False), ('source_id', '!=', False)]}"
|
||||
string="Detached because" name="detach_reason" readonly="1"/>
|
||||
</group>
|
||||
</group>
|
||||
<group>
|
||||
<group colspan="4">
|
||||
<field name="forwardport_ids" nolabel="1" readonly="True">
|
||||
<tree>
|
||||
<button type="object" name="get_formview_action" icon="fa-external-link"/>
|
||||
<field name="target" string="Branch"/>
|
||||
<field name="number"/>
|
||||
</tree>
|
||||
</field>
|
||||
</group>
|
||||
</group>
|
||||
</page>
|
||||
</notebook>
|
||||
<!-- influencers -->
|
||||
<group string="Message">
|
||||
<group colspan="4">
|
||||
<field name="message" nolabel="1"/>
|
||||
</group>
|
||||
</group>
|
||||
<group>
|
||||
<group colspan="4" string="Delegates">
|
||||
<field name="delegates" nolabel="1">
|
||||
<tree>
|
||||
<button type="object" name="get_formview_action" icon="fa-external-link"/>
|
||||
<field name="name"/>
|
||||
<field name="github_login"/>
|
||||
</tree>
|
||||
</field>
|
||||
</group>
|
||||
</group>
|
||||
</sheet>
|
||||
<div class="oe_chatter">
|
||||
<field name="message_follower_ids" widget="mail_followers"/>
|
||||
<field name="message_ids" widget="mail_thread"/>
|
||||
</div>
|
||||
</form>
|
||||
</field>
|
||||
</record>
|
||||
@ -244,6 +331,15 @@
|
||||
</field>
|
||||
</group>
|
||||
</group>
|
||||
<group string="Batches">
|
||||
<field name="batch_ids" colspan="4" nolabel="1" readonly="1">
|
||||
<tree>
|
||||
<button type="object" name="get_formview_action" icon="fa-external-link"/>
|
||||
<field name="name"/>
|
||||
<field name="prs" widget="many2many_tags"/>
|
||||
</tree>
|
||||
</field>
|
||||
</group>
|
||||
<group string="PRs">
|
||||
<field name="pr_ids" colspan="4" nolabel="1" readonly="1">
|
||||
<tree>
|
||||
@ -254,14 +350,6 @@
|
||||
</tree>
|
||||
</field>
|
||||
</group>
|
||||
<group string="Batches">
|
||||
<field name="batch_ids" colspan="4" nolabel="1" readonly="1">
|
||||
<tree>
|
||||
<button type="object" name="get_formview_action" icon="fa-external-link"/>
|
||||
<field name="prs" widget="many2many_tags"/>
|
||||
</tree>
|
||||
</field>
|
||||
</group>
|
||||
</sheet>
|
||||
</form>
|
||||
</field>
|
||||
|
@ -32,6 +32,8 @@
|
||||
<field name="secret"/>
|
||||
</group>
|
||||
<group>
|
||||
<field name="staging_enabled" widget="boolean_toggle"/>
|
||||
<field name="staging_priority"/>
|
||||
<field name="uniquifier"/>
|
||||
<field name="ci_timeout"/>
|
||||
<field name="batch_limit"/>
|
||||
|
@ -331,7 +331,7 @@
|
||||
<template id="view_pull_request_info_error">
|
||||
<div class="alert alert-danger">
|
||||
Error:
|
||||
<span t-esc="pr.with_context(active_test=False).batch_ids[-1:].staging_id.reason">
|
||||
<span t-esc="pr.with_context(active_test=False).batch_id.staging_ids[-1:].reason">
|
||||
Unable to stage PR
|
||||
</span>
|
||||
</div>
|
||||
@ -412,17 +412,178 @@
|
||||
<t t-else="">open</t>
|
||||
</t>
|
||||
<t t-call="runbot_merge.view_pull_request_info_{{tmpl.strip()}}"/>
|
||||
<t t-set="target_cls" t-value="None if pr.target.active else 'text-muted bg-warning'"/>
|
||||
<dl class="runbot-merge-fields">
|
||||
<dt>label</dt>
|
||||
<dd><span t-field="pr.label"/></dd>
|
||||
<dt>head</dt>
|
||||
<dd><a t-attf-href="{{pr.github_url}}/commits/{{pr.head}}"><span t-field="pr.head"/></a></dd>
|
||||
<dt t-att-class="target_cls">target</dt>
|
||||
<dd t-att-class="target_cls"><span t-field="pr.target"/></dd>
|
||||
</dl>
|
||||
<t t-call="runbot_merge.dashboard-table"/>
|
||||
<p t-field="pr.message"/>
|
||||
</div></div>
|
||||
</t>
|
||||
</template>
|
||||
|
||||
<record id="dashboard-pre" model="ir.actions.server">
|
||||
<field name="name">Preparation for the preparation of the PR dashboard content</field>
|
||||
<field name="state">code</field>
|
||||
<field name="model_id" ref="base.model_ir_qweb"/>
|
||||
<field name="code"><![CDATA[
|
||||
project = pr.repository.project_id
|
||||
genealogy = pr.batch_id.genealogy_ids
|
||||
repos = project.repo_ids & genealogy.all_prs.repository
|
||||
targets = genealogy.all_prs.target
|
||||
if not genealogy:
|
||||
# if a PR is closed, it may not have a batch to get a genealogy from,
|
||||
# in which case it's just a sole soul drifting in the deep dark
|
||||
branches = pr.target
|
||||
repos = pr.repository
|
||||
elif all(p.state in ('merged', 'closed') for p in genealogy[-1].all_prs):
|
||||
branches = (project.branch_ids & targets)[::-1]
|
||||
else:
|
||||
# if the tip of the genealogy is not closed, extend to the furthest limit,
|
||||
# keeping branches which are active or have an associated batch / PR
|
||||
limit = genealogy.prs.limit_id.sorted(lambda b: (b.sequence, b.name))
|
||||
limit_high = project.branch_ids.ids.index(limit.id) if limit else None
|
||||
limit = targets.sorted(lambda b: (b.sequence, b.name))[-1]
|
||||
limit_low = project.branch_ids.ids.index(limit.id)
|
||||
branches = project.branch_ids[limit_high:limit_low+1].filtered(lambda b: b.active or b in targets)[::-1]
|
||||
|
||||
action = (project, repos, branches, genealogy)
|
||||
]]></field>
|
||||
</record>
|
||||
|
||||
<record id="dashboard-prep" model="ir.actions.server">
|
||||
<field name="name">Preparation of the PR dashboard content</field>
|
||||
<field name="state">code</field>
|
||||
<field name="model_id" ref="base.model_ir_qweb"/>
|
||||
<field name="code"><![CDATA[
|
||||
batches = {}
|
||||
for branch in branches:
|
||||
# FIXME: batches with inconsistent targets?
|
||||
if genealogy:
|
||||
prs_batch = genealogy.filtered(lambda b: b.target == branch).all_prs
|
||||
else:
|
||||
prs_batch = pr
|
||||
for repo in repos:
|
||||
prs = prs_batch.filtered(lambda p: p.repository == repo)
|
||||
st = 0
|
||||
detached = False
|
||||
pr_fmt = []
|
||||
for p in prs:
|
||||
st |= (bool(p.error) << 2 | (p.state == 'merged') << 1 | bool(p.blocked) << 0)
|
||||
|
||||
done = p.state in ('closed', 'merged')
|
||||
# this will hide the detachment signal when the PRs are merged/closed, cleaner but less correct?
|
||||
detached = detached or bool(p.source_id and not p.parent_id and not done)
|
||||
label = p.state
|
||||
if p.blocked:
|
||||
label = "%s, %s" % (label, p.blocked)
|
||||
pr_fmt.append({
|
||||
'pr': p,
|
||||
'number': p.number,
|
||||
'label': label,
|
||||
'closed': p.closed,
|
||||
'backend_url': "/web#view_type=form&model=runbot_merge.pull_requests&id=%d" % p.id,
|
||||
'github_url': p.github_url,
|
||||
'checked': done or p.status == 'success',
|
||||
'reviewed': done or bool(p.reviewed_by),
|
||||
'attached': done or p.parent_id or not p.source_id,
|
||||
})
|
||||
state = None
|
||||
for i, s in zip(range(2, -1, -1), ['danger', 'success', 'warning']):
|
||||
if st & (1 << i):
|
||||
state = s
|
||||
break
|
||||
|
||||
batches[repo, branch] = {
|
||||
'active': pr in prs,
|
||||
'detached': detached,
|
||||
'state': state,
|
||||
'prs': pr_fmt,
|
||||
'pr_ids': prs,
|
||||
}
|
||||
|
||||
action = batches
|
||||
]]></field>
|
||||
</record>
|
||||
<template id="dashboard-table">
|
||||
<t t-set="pre" t-value="pr.env.ref('runbot_merge.dashboard-pre').sudo()._run_action_code_multi({'pr': pr})"/>
|
||||
<t t-set="repos" t-value="pre[1]"/>
|
||||
<t t-set="branches" t-value="pre[2]"/>
|
||||
<t t-set="batches" t-value="env.ref('runbot_merge.dashboard-prep').sudo()._run_action_code_multi({
|
||||
'pr': pr,
|
||||
'repos': repos,
|
||||
'branches': branches,
|
||||
'genealogy': pre[3],
|
||||
})"/>
|
||||
<table class="table table-bordered table-sm">
|
||||
<colgroup>
|
||||
<col/>
|
||||
<col t-foreach="repos" t-as="repo"
|
||||
t-att-class="'bg-info' if repo == pr.repository else None"
|
||||
/>
|
||||
</colgroup>
|
||||
<thead>
|
||||
<tr>
|
||||
<th/>
|
||||
<th t-foreach="repos" t-as="repo">
|
||||
<t t-out="repo.name"/>
|
||||
</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<!--
|
||||
table-info looks like shit (possibly because no odoo styling so use bg-info
|
||||
text-muted doesn't do anything, so set some opacity
|
||||
-->
|
||||
<tr t-foreach="branches" t-as="branch"
|
||||
t-att-title="None if branch.active else 'branch is disabled'"
|
||||
t-attf-class="{{
|
||||
'bg-info' if branch == pr.target else ''
|
||||
}} {{
|
||||
'inactive' if not branch.active else ''
|
||||
}}">
|
||||
<td t-out="branch.name"/>
|
||||
<t t-foreach="repos" t-as="repo">
|
||||
<t t-set="ps" t-value="batches[repo, branch]"/>
|
||||
<t t-set="stateclass" t-value="ps['state'] and 'table-'+ps['state']"/>
|
||||
<t t-set="detached" t-value="ps['detached']"/>
|
||||
<td t-if="ps['prs']"
|
||||
t-att-title="'detached' if detached else None"
|
||||
t-attf-class="{{
|
||||
'table-active' if ps['active'] else ''
|
||||
}} {{
|
||||
'detached' if detached else ''
|
||||
}}{{stateclass}}">
|
||||
<!--
|
||||
there should be only one PR per (repo, target) but
|
||||
that's not always the case
|
||||
-->
|
||||
<span t-foreach="ps['prs']" t-as="p"
|
||||
t-att-title="p['label']"
|
||||
t-att-class="'closed' if p['closed'] else None">
|
||||
<a t-attf-href="/{{repo.name}}/pull/{{p['number']}}">#<t t-out="p['number']"/></a>
|
||||
<a t-attf-class="fa fa-brands fa-github"
|
||||
title="Open on Github"
|
||||
t-att-href="p['github_url']"
|
||||
/>
|
||||
<a groups="base.group_user"
|
||||
title="Open in Backend"
|
||||
t-attf-class="fa fa-external-link"
|
||||
t-att-href="p['backend_url']"
|
||||
/>
|
||||
<sup t-if="not p['checked']" class="text-danger">unchecked</sup>
|
||||
<sup t-if="not p['reviewed']" class="text-danger">unreviewed</sup>
|
||||
<sup t-if="not p['attached']"
|
||||
t-attf-title="detached: {{p['pr'].detach_reason}}"
|
||||
class="text-warning fa fa-unlink"/>
|
||||
</span>
|
||||
</td>
|
||||
<td t-else=""/>
|
||||
</t>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</template>
|
||||
</odoo>
|
||||
|
Loading…
Reference in New Issue
Block a user