mirror of
https://github.com/odoo/runbot.git
synced 2025-03-15 15:35:46 +07:00
[MERGE] runbot_merge, forwardport: latest updates
Got a bunch of updates since the initial attempt to migrate the mergebot before the odoo days.
This commit is contained in:
commit
0a17454838
77
conftest.py
77
conftest.py
@ -79,6 +79,18 @@ import requests
|
|||||||
NGROK_CLI = [
|
NGROK_CLI = [
|
||||||
'ngrok', 'start', '--none', '--region', 'eu',
|
'ngrok', 'start', '--none', '--region', 'eu',
|
||||||
]
|
]
|
||||||
|
# When an operation can trigger webhooks, the test suite has to wait *some time*
|
||||||
|
# in the hope that the webhook(s) will have been dispatched by the end as github
|
||||||
|
# provides no real webhook feedback (e.g. an event stream).
|
||||||
|
#
|
||||||
|
# This also acts as a bit of a rate limiter, as github has gotten more and more
|
||||||
|
# angry with that. Especially around event-generating limits.
|
||||||
|
#
|
||||||
|
# TODO: explore https://docs.github.com/en/rest/using-the-rest-api/rate-limits-for-the-rest-api
|
||||||
|
# and see if it would be possible to be a better citizen (e.g. add test
|
||||||
|
# retry / backoff using GH tighter GH integration)
|
||||||
|
WEBHOOK_WAIT_TIME = 10 # seconds
|
||||||
|
LOCAL_WEBHOOK_WAIT_TIME = 1
|
||||||
|
|
||||||
def pytest_addoption(parser):
|
def pytest_addoption(parser):
|
||||||
parser.addoption('--addons-path')
|
parser.addoption('--addons-path')
|
||||||
@ -99,7 +111,12 @@ def pytest_addoption(parser):
|
|||||||
def is_manager(config):
|
def is_manager(config):
|
||||||
return not hasattr(config, 'workerinput')
|
return not hasattr(config, 'workerinput')
|
||||||
|
|
||||||
def pytest_configure(config):
|
def pytest_configure(config: pytest.Config) -> None:
|
||||||
|
global WEBHOOK_WAIT_TIME
|
||||||
|
# no tunnel => local setup, no need to wait as much
|
||||||
|
if not config.getoption('--tunnel'):
|
||||||
|
WEBHOOK_WAIT_TIME = LOCAL_WEBHOOK_WAIT_TIME
|
||||||
|
|
||||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'mergebot_test_utils'))
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'mergebot_test_utils'))
|
||||||
config.addinivalue_line(
|
config.addinivalue_line(
|
||||||
"markers",
|
"markers",
|
||||||
@ -126,7 +143,7 @@ def _set_socket_timeout():
|
|||||||
socket.setdefaulttimeout(120.0)
|
socket.setdefaulttimeout(120.0)
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def config(pytestconfig):
|
def config(pytestconfig: pytest.Config) -> dict[str, dict[str, str]]:
|
||||||
""" Flat version of the pytest config file (pytest.ini), parses to a
|
""" Flat version of the pytest config file (pytest.ini), parses to a
|
||||||
simple dict of {section: {key: value}}
|
simple dict of {section: {key: value}}
|
||||||
|
|
||||||
@ -211,7 +228,7 @@ def users(partners, rolemap):
|
|||||||
return {k: v['login'] for k, v in rolemap.items()}
|
return {k: v['login'] for k, v in rolemap.items()}
|
||||||
|
|
||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
def tunnel(pytestconfig, port):
|
def tunnel(pytestconfig: pytest.Config, port: int):
|
||||||
""" Creates a tunnel to localhost:<port> using ngrok or localtunnel, should yield the
|
""" Creates a tunnel to localhost:<port> using ngrok or localtunnel, should yield the
|
||||||
publicly routable address & terminate the process at the end of the session
|
publicly routable address & terminate the process at the end of the session
|
||||||
"""
|
"""
|
||||||
@ -368,8 +385,8 @@ def db(request, module, dbcache, tmpdir):
|
|||||||
if not request.config.getoption('--no-delete'):
|
if not request.config.getoption('--no-delete'):
|
||||||
subprocess.run(['dropdb', rundb], check=True)
|
subprocess.run(['dropdb', rundb], check=True)
|
||||||
|
|
||||||
def wait_for_hook(n=1):
|
def wait_for_hook():
|
||||||
time.sleep(10 * n)
|
time.sleep(WEBHOOK_WAIT_TIME)
|
||||||
|
|
||||||
def wait_for_server(db, port, proc, mod, timeout=120):
|
def wait_for_server(db, port, proc, mod, timeout=120):
|
||||||
""" Polls for server to be response & have installed our module.
|
""" Polls for server to be response & have installed our module.
|
||||||
@ -437,6 +454,7 @@ class Base(models.AbstractModel):
|
|||||||
_inherit = 'base'
|
_inherit = 'base'
|
||||||
|
|
||||||
def run_crons(self):
|
def run_crons(self):
|
||||||
|
builtins.current_date = self.env.context.get('current_date')
|
||||||
builtins.forwardport_merged_before = self.env.context.get('forwardport_merged_before')
|
builtins.forwardport_merged_before = self.env.context.get('forwardport_merged_before')
|
||||||
builtins.forwardport_updated_before = self.env.context.get('forwardport_updated_before')
|
builtins.forwardport_updated_before = self.env.context.get('forwardport_updated_before')
|
||||||
self.env['ir.cron']._process_jobs(self.env.cr.dbname)
|
self.env['ir.cron']._process_jobs(self.env.cr.dbname)
|
||||||
@ -477,6 +495,7 @@ class IrCron(models.Model):
|
|||||||
(mod / '__manifest__.py').write_text(pprint.pformat({
|
(mod / '__manifest__.py').write_text(pprint.pformat({
|
||||||
'name': 'dummy saas_worker',
|
'name': 'dummy saas_worker',
|
||||||
'version': '1.0',
|
'version': '1.0',
|
||||||
|
'license': 'BSD-0-Clause',
|
||||||
}), encoding='utf-8')
|
}), encoding='utf-8')
|
||||||
(mod / 'util.py').write_text("""\
|
(mod / 'util.py').write_text("""\
|
||||||
def from_role(*_, **__):
|
def from_role(*_, **__):
|
||||||
@ -777,19 +796,20 @@ class Repo:
|
|||||||
parents=[p['sha'] for p in gh_commit['parents']],
|
parents=[p['sha'] for p in gh_commit['parents']],
|
||||||
)
|
)
|
||||||
|
|
||||||
def read_tree(self, commit):
|
def read_tree(self, commit: Commit, *, recursive=False) -> dict[str, str]:
|
||||||
""" read tree object from commit
|
""" read tree object from commit
|
||||||
|
|
||||||
:param Commit commit:
|
|
||||||
:rtype: Dict[str, str]
|
|
||||||
"""
|
"""
|
||||||
r = self._session.get('https://api.github.com/repos/{}/git/trees/{}'.format(self.name, commit.tree))
|
r = self._session.get(
|
||||||
|
'https://api.github.com/repos/{}/git/trees/{}'.format(self.name, commit.tree),
|
||||||
|
params={'recursive': '1'} if recursive else None,
|
||||||
|
)
|
||||||
assert 200 <= r.status_code < 300, r.text
|
assert 200 <= r.status_code < 300, r.text
|
||||||
|
|
||||||
# read tree's blobs
|
# read tree's blobs
|
||||||
tree = {}
|
tree = {}
|
||||||
for t in r.json()['tree']:
|
for t in r.json()['tree']:
|
||||||
assert t['type'] == 'blob', "we're *not* doing recursive trees in test cases"
|
if t['type'] != 'blob':
|
||||||
|
continue # only keep blobs
|
||||||
r = self._session.get('https://api.github.com/repos/{}/git/blobs/{}'.format(self.name, t['sha']))
|
r = self._session.get('https://api.github.com/repos/{}/git/blobs/{}'.format(self.name, t['sha']))
|
||||||
assert 200 <= r.status_code < 300, r.text
|
assert 200 <= r.status_code < 300, r.text
|
||||||
tree[t['path']] = base64.b64decode(r.json()['content']).decode()
|
tree[t['path']] = base64.b64decode(r.json()['content']).decode()
|
||||||
@ -813,6 +833,11 @@ class Repo:
|
|||||||
r = self._session.patch('https://api.github.com/repos/{}/git/refs/{}'.format(self.name, name), json={'sha': commit, 'force': force})
|
r = self._session.patch('https://api.github.com/repos/{}/git/refs/{}'.format(self.name, name), json={'sha': commit, 'force': force})
|
||||||
assert r.ok, r.text
|
assert r.ok, r.text
|
||||||
|
|
||||||
|
def delete_ref(self, name):
|
||||||
|
assert self.hook
|
||||||
|
r = self._session.delete(f'https://api.github.com/repos/{self.name}/git/refs/{name}')
|
||||||
|
assert r.ok, r.text
|
||||||
|
|
||||||
def protect(self, branch):
|
def protect(self, branch):
|
||||||
assert self.hook
|
assert self.hook
|
||||||
r = self._session.put('https://api.github.com/repos/{}/branches/{}/protection'.format(self.name, branch), json={
|
r = self._session.put('https://api.github.com/repos/{}/branches/{}/protection'.format(self.name, branch), json={
|
||||||
@ -996,11 +1021,11 @@ class Repo:
|
|||||||
return
|
return
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self.hook = 1
|
self.hook = True
|
||||||
return self
|
return self
|
||||||
def __exit__(self, *args):
|
def __exit__(self, *args):
|
||||||
wait_for_hook(self.hook)
|
wait_for_hook()
|
||||||
self.hook = 0
|
self.hook = False
|
||||||
class Commit:
|
class Commit:
|
||||||
def __init__(self, message, *, author=None, committer=None, tree, reset=False):
|
def __init__(self, message, *, author=None, committer=None, tree, reset=False):
|
||||||
self.id = None
|
self.id = None
|
||||||
@ -1134,7 +1159,6 @@ class PR:
|
|||||||
headers=headers
|
headers=headers
|
||||||
)
|
)
|
||||||
assert 200 <= r.status_code < 300, r.text
|
assert 200 <= r.status_code < 300, r.text
|
||||||
wait_for_hook()
|
|
||||||
|
|
||||||
def delete_comment(self, cid, token=None):
|
def delete_comment(self, cid, token=None):
|
||||||
assert self.repo.hook
|
assert self.repo.hook
|
||||||
@ -1247,13 +1271,27 @@ class LabelsProxy(collections.abc.MutableSet):
|
|||||||
|
|
||||||
class Environment:
|
class Environment:
|
||||||
def __init__(self, port, db):
|
def __init__(self, port, db):
|
||||||
self._uid = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/common'.format(port)).authenticate(db, 'admin', 'admin', {})
|
self._port = port
|
||||||
self._object = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/object'.format(port))
|
|
||||||
self._db = db
|
self._db = db
|
||||||
|
self._uid = None
|
||||||
|
self._password = None
|
||||||
|
self._object = xmlrpc.client.ServerProxy(f'http://localhost:{port}/xmlrpc/2/object')
|
||||||
|
self.login('admin', 'admin')
|
||||||
|
|
||||||
|
def with_user(self, login, password):
|
||||||
|
env = copy.copy(self)
|
||||||
|
env.login(login, password)
|
||||||
|
return env
|
||||||
|
|
||||||
|
def login(self, login, password):
|
||||||
|
self._password = password
|
||||||
|
self._uid = xmlrpc.client.ServerProxy(
|
||||||
|
f'http://localhost:{self._port}/xmlrpc/2/common'
|
||||||
|
).authenticate(self._db, login, password, {})
|
||||||
|
|
||||||
def __call__(self, model, method, *args, **kwargs):
|
def __call__(self, model, method, *args, **kwargs):
|
||||||
return self._object.execute_kw(
|
return self._object.execute_kw(
|
||||||
self._db, self._uid, 'admin',
|
self._db, self._uid, self._password,
|
||||||
model, method,
|
model, method,
|
||||||
args, kwargs
|
args, kwargs
|
||||||
)
|
)
|
||||||
@ -1409,6 +1447,9 @@ class Model:
|
|||||||
)
|
)
|
||||||
|
|
||||||
def mapped(self, path):
|
def mapped(self, path):
|
||||||
|
if callable(path):
|
||||||
|
return [path(r) for r in self]
|
||||||
|
|
||||||
field, *rest = path.split('.', 1)
|
field, *rest = path.split('.', 1)
|
||||||
descr = self._fields[field]
|
descr = self._fields[field]
|
||||||
if descr['type'] in ('many2one', 'one2many', 'many2many'):
|
if descr['type'] in ('many2one', 'one2many', 'many2many'):
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
<tree>
|
<tree>
|
||||||
<field name="source"/>
|
<field name="source"/>
|
||||||
<field name="batch_id"/>
|
<field name="batch_id"/>
|
||||||
|
<field name="retry_after_relative" string="Retry In"/>
|
||||||
</tree>
|
</tree>
|
||||||
</field>
|
</field>
|
||||||
</record>
|
</record>
|
||||||
@ -20,8 +21,13 @@
|
|||||||
<field name="arch" type="xml">
|
<field name="arch" type="xml">
|
||||||
<form>
|
<form>
|
||||||
<group>
|
<group>
|
||||||
<group><field name="source"/></group>
|
<group>
|
||||||
<group><field name="batch_id"/></group>
|
<field name="source"/>
|
||||||
|
<field name="batch_id"/>
|
||||||
|
</group>
|
||||||
|
<group>
|
||||||
|
<field name="retry_after"/>
|
||||||
|
</group>
|
||||||
</group>
|
</group>
|
||||||
</form>
|
</form>
|
||||||
</field>
|
</field>
|
||||||
|
@ -7,9 +7,10 @@ from datetime import datetime, timedelta
|
|||||||
|
|
||||||
import requests
|
import requests
|
||||||
import sentry_sdk
|
import sentry_sdk
|
||||||
|
from babel.dates import format_timedelta
|
||||||
from dateutil import relativedelta
|
from dateutil import relativedelta
|
||||||
|
|
||||||
from odoo import fields, models
|
from odoo import api, fields, models
|
||||||
from odoo.addons.runbot_merge import git
|
from odoo.addons.runbot_merge import git
|
||||||
from odoo.addons.runbot_merge.github import GH
|
from odoo.addons.runbot_merge.github import GH
|
||||||
|
|
||||||
@ -73,8 +74,10 @@ class ForwardPortTasks(models.Model, Queue):
|
|||||||
('complete', 'Complete ported batches'),
|
('complete', 'Complete ported batches'),
|
||||||
], required=True)
|
], required=True)
|
||||||
retry_after = fields.Datetime(required=True, default='1900-01-01 01:01:01')
|
retry_after = fields.Datetime(required=True, default='1900-01-01 01:01:01')
|
||||||
|
retry_after_relative = fields.Char(compute="_compute_retry_after_relative")
|
||||||
pr_id = fields.Many2one('runbot_merge.pull_requests')
|
pr_id = fields.Many2one('runbot_merge.pull_requests')
|
||||||
|
|
||||||
|
@api.model_create_multi
|
||||||
def create(self, vals_list):
|
def create(self, vals_list):
|
||||||
self.env.ref('forwardport.port_forward')._trigger()
|
self.env.ref('forwardport.port_forward')._trigger()
|
||||||
return super().create(vals_list)
|
return super().create(vals_list)
|
||||||
@ -90,6 +93,15 @@ class ForwardPortTasks(models.Model, Queue):
|
|||||||
('retry_after', '<=', fields.Datetime.to_string(fields.Datetime.now())),
|
('retry_after', '<=', fields.Datetime.to_string(fields.Datetime.now())),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@api.depends('retry_after')
|
||||||
|
def _compute_retry_after_relative(self):
|
||||||
|
now = fields.Datetime.now()
|
||||||
|
for t in self:
|
||||||
|
if t.retry_after <= now:
|
||||||
|
t.retry_after_relative = ""
|
||||||
|
else:
|
||||||
|
t.retry_after_relative = format_timedelta(t.retry_after - now, locale=t.env.lang)
|
||||||
|
|
||||||
def _on_failure(self):
|
def _on_failure(self):
|
||||||
super()._on_failure()
|
super()._on_failure()
|
||||||
self.retry_after = fields.Datetime.to_string(fields.Datetime.now() + timedelta(minutes=30))
|
self.retry_after = fields.Datetime.to_string(fields.Datetime.now() + timedelta(minutes=30))
|
||||||
@ -201,7 +213,7 @@ class ForwardPortTasks(models.Model, Queue):
|
|||||||
# NOTE: ports the new source everywhere instead of porting each
|
# NOTE: ports the new source everywhere instead of porting each
|
||||||
# PR to the next step as it does not *stop* on conflict
|
# PR to the next step as it does not *stop* on conflict
|
||||||
repo = git.get_local(source.repository)
|
repo = git.get_local(source.repository)
|
||||||
conflict, head = source._create_fp_branch(repo, target)
|
conflict, head = source._create_port_branch(repo, target, forward=True)
|
||||||
repo.push(git.fw_url(pr.repository), f'{head}:refs/heads/{ref}')
|
repo.push(git.fw_url(pr.repository), f'{head}:refs/heads/{ref}')
|
||||||
|
|
||||||
remote_target = repository.fp_remote_target
|
remote_target = repository.fp_remote_target
|
||||||
@ -268,6 +280,7 @@ class UpdateQueue(models.Model, Queue):
|
|||||||
original_root = fields.Many2one('runbot_merge.pull_requests')
|
original_root = fields.Many2one('runbot_merge.pull_requests')
|
||||||
new_root = fields.Many2one('runbot_merge.pull_requests')
|
new_root = fields.Many2one('runbot_merge.pull_requests')
|
||||||
|
|
||||||
|
@api.model_create_multi
|
||||||
def create(self, vals_list):
|
def create(self, vals_list):
|
||||||
self.env.ref('forwardport.updates')._trigger()
|
self.env.ref('forwardport.updates')._trigger()
|
||||||
return super().create(vals_list)
|
return super().create(vals_list)
|
||||||
@ -300,7 +313,7 @@ class UpdateQueue(models.Model, Queue):
|
|||||||
return
|
return
|
||||||
|
|
||||||
repo = git.get_local(previous.repository)
|
repo = git.get_local(previous.repository)
|
||||||
conflicts, new_head = previous._create_fp_branch(repo, child.target)
|
conflicts, new_head = previous._create_port_branch(repo, child.target, forward=True)
|
||||||
|
|
||||||
if conflicts:
|
if conflicts:
|
||||||
_, out, err, _ = conflicts
|
_, out, err, _ = conflicts
|
||||||
@ -357,6 +370,7 @@ class DeleteBranches(models.Model, Queue):
|
|||||||
|
|
||||||
pr_id = fields.Many2one('runbot_merge.pull_requests')
|
pr_id = fields.Many2one('runbot_merge.pull_requests')
|
||||||
|
|
||||||
|
@api.model_create_multi
|
||||||
def create(self, vals_list):
|
def create(self, vals_list):
|
||||||
self.env.ref('forwardport.remover')._trigger(datetime.now() - MERGE_AGE)
|
self.env.ref('forwardport.remover')._trigger(datetime.now() - MERGE_AGE)
|
||||||
return super().create(vals_list)
|
return super().create(vals_list)
|
||||||
|
@ -34,6 +34,8 @@ from odoo.addons.runbot_merge import git, utils
|
|||||||
from odoo.addons.runbot_merge.models.pull_requests import Branch
|
from odoo.addons.runbot_merge.models.pull_requests import Branch
|
||||||
from odoo.addons.runbot_merge.models.stagings_create import Message
|
from odoo.addons.runbot_merge.models.stagings_create import Message
|
||||||
|
|
||||||
|
Conflict = tuple[str, str, str, list[str]]
|
||||||
|
|
||||||
DEFAULT_DELTA = dateutil.relativedelta.relativedelta(days=3)
|
DEFAULT_DELTA = dateutil.relativedelta.relativedelta(days=3)
|
||||||
|
|
||||||
_logger = logging.getLogger('odoo.addons.forwardport')
|
_logger = logging.getLogger('odoo.addons.forwardport')
|
||||||
@ -198,12 +200,13 @@ class PullRequests(models.Model):
|
|||||||
if existing:
|
if existing:
|
||||||
old |= existing
|
old |= existing
|
||||||
continue
|
continue
|
||||||
|
|
||||||
to_create.append(vals)
|
to_create.append(vals)
|
||||||
if vals.get('parent_id') and 'source_id' not in vals:
|
if vals.get('parent_id') and 'source_id' not in vals:
|
||||||
vals['source_id'] = self.browse(vals['parent_id']).root_id.id
|
vals['source_id'] = self.browse(vals['parent_id']).root_id.id
|
||||||
new = super().create(to_create)
|
if not to_create:
|
||||||
|
return old
|
||||||
|
|
||||||
|
new = super().create(to_create)
|
||||||
for pr in new:
|
for pr in new:
|
||||||
# added a new PR to an already forward-ported batch: port the PR
|
# added a new PR to an already forward-ported batch: port the PR
|
||||||
if self.env['runbot_merge.batch'].search_count([
|
if self.env['runbot_merge.batch'].search_count([
|
||||||
@ -214,6 +217,8 @@ class PullRequests(models.Model):
|
|||||||
'source': 'complete',
|
'source': 'complete',
|
||||||
'pr_id': pr.id,
|
'pr_id': pr.id,
|
||||||
})
|
})
|
||||||
|
if not old:
|
||||||
|
return new
|
||||||
|
|
||||||
new = iter(new)
|
new = iter(new)
|
||||||
old = iter(old)
|
old = iter(old)
|
||||||
@ -297,17 +302,29 @@ class PullRequests(models.Model):
|
|||||||
return sorted(commits, key=lambda c: idx[c['sha']])
|
return sorted(commits, key=lambda c: idx[c['sha']])
|
||||||
|
|
||||||
|
|
||||||
def _create_fp_branch(self, source, target_branch):
|
def _create_port_branch(
|
||||||
|
self,
|
||||||
|
source: git.Repo,
|
||||||
|
target_branch: Branch,
|
||||||
|
*,
|
||||||
|
forward: bool,
|
||||||
|
) -> tuple[typing.Optional[Conflict], str]:
|
||||||
""" Creates a forward-port for the current PR to ``target_branch`` under
|
""" Creates a forward-port for the current PR to ``target_branch`` under
|
||||||
``fp_branch_name``.
|
``fp_branch_name``.
|
||||||
|
|
||||||
:param target_branch: the branch to port forward to
|
:param source: the git repository to work with / in
|
||||||
:rtype: (None | (str, str, str, list[commit]), Repo)
|
:param target_branch: the branch to port ``self`` to
|
||||||
|
:param forward: whether this is a forward (``True``) or a back
|
||||||
|
(``False``) port
|
||||||
|
:returns: a conflict if one happened, and the head of the port branch
|
||||||
|
(either a succcessful port of the entire `self`, or a conflict
|
||||||
|
commit)
|
||||||
"""
|
"""
|
||||||
logger = _logger.getChild(str(self.id))
|
logger = _logger.getChild(str(self.id))
|
||||||
root = self.root_id
|
root = self.root_id
|
||||||
logger.info(
|
logger.info(
|
||||||
"Forward-porting %s (%s) to %s",
|
"%s %s (%s) to %s",
|
||||||
|
"Forward-porting" if forward else "Back-porting",
|
||||||
self.display_name, root.display_name, target_branch.name
|
self.display_name, root.display_name, target_branch.name
|
||||||
)
|
)
|
||||||
fetch = source.with_config(stdout=subprocess.PIPE, stderr=subprocess.STDOUT).fetch()
|
fetch = source.with_config(stdout=subprocess.PIPE, stderr=subprocess.STDOUT).fetch()
|
||||||
@ -356,7 +373,7 @@ class PullRequests(models.Model):
|
|||||||
'--merge-base', commits[0]['parents'][0]['sha'],
|
'--merge-base', commits[0]['parents'][0]['sha'],
|
||||||
target_branch.name,
|
target_branch.name,
|
||||||
root.head,
|
root.head,
|
||||||
)
|
).stdout.decode().splitlines(keepends=False)[0]
|
||||||
# if there was a single commit, reuse its message when committing
|
# if there was a single commit, reuse its message when committing
|
||||||
# the conflict
|
# the conflict
|
||||||
if len(commits) == 1:
|
if len(commits) == 1:
|
||||||
@ -372,9 +389,28 @@ stderr:
|
|||||||
{err}
|
{err}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
target_head = source.stdout().rev_parse(target_branch.name).stdout.decode().strip()
|
# if a file is modified by the original PR and added by the forward
|
||||||
|
# port / conflict it's a modify/delete conflict: the file was
|
||||||
|
# deleted in the target branch, and the update (modify) in the
|
||||||
|
# original PR causes it to be added back
|
||||||
|
original_modified = set(conf.diff(
|
||||||
|
"--diff-filter=M", "--name-only",
|
||||||
|
"--merge-base", root.target.name,
|
||||||
|
root.head,
|
||||||
|
).stdout.decode().splitlines(keepends=False))
|
||||||
|
conflict_added = set(conf.diff(
|
||||||
|
"--diff-filter=A", "--name-only",
|
||||||
|
target_branch.name,
|
||||||
|
tree,
|
||||||
|
).stdout.decode().splitlines(keepends=False))
|
||||||
|
if modify_delete := (conflict_added & original_modified):
|
||||||
|
# rewrite the tree with conflict markers added to modify/deleted blobs
|
||||||
|
tree = conf.modify_delete(tree, modify_delete)
|
||||||
|
|
||||||
|
target_head = source.stdout().rev_parse(f"refs/heads/{target_branch.name}")\
|
||||||
|
.stdout.decode().strip()
|
||||||
commit = conf.commit_tree(
|
commit = conf.commit_tree(
|
||||||
tree=tree.stdout.decode().splitlines(keepends=False)[0],
|
tree=tree,
|
||||||
parents=[target_head],
|
parents=[target_head],
|
||||||
message=str(msg),
|
message=str(msg),
|
||||||
author=author,
|
author=author,
|
||||||
@ -396,7 +432,7 @@ stderr:
|
|||||||
logger = _logger.getChild(str(self.id)).getChild('cherrypick')
|
logger = _logger.getChild(str(self.id)).getChild('cherrypick')
|
||||||
|
|
||||||
# target's head
|
# target's head
|
||||||
head = repo.stdout().rev_parse(branch).stdout.decode().strip()
|
head = repo.stdout().rev_parse(f"refs/heads/{branch}").stdout.decode().strip()
|
||||||
|
|
||||||
commits = self.commits()
|
commits = self.commits()
|
||||||
logger.info(
|
logger.info(
|
||||||
@ -494,6 +530,8 @@ stderr:
|
|||||||
('source_id', '!=', False),
|
('source_id', '!=', False),
|
||||||
# active
|
# active
|
||||||
('state', 'not in', ['merged', 'closed']),
|
('state', 'not in', ['merged', 'closed']),
|
||||||
|
# not ready
|
||||||
|
('blocked', '!=', False),
|
||||||
('source_id.merge_date', '<', cutoff),
|
('source_id.merge_date', '<', cutoff),
|
||||||
], order='source_id, id'), lambda p: p.source_id)
|
], order='source_id, id'), lambda p: p.source_id)
|
||||||
|
|
||||||
@ -508,11 +546,8 @@ stderr:
|
|||||||
continue
|
continue
|
||||||
source.reminder_backoff_factor += 1
|
source.reminder_backoff_factor += 1
|
||||||
|
|
||||||
# only keep the PRs which don't have an attached descendant)
|
# only keep the PRs which don't have an attached descendant
|
||||||
pr_ids = {p.id for p in prs}
|
for pr in set(prs).difference(p.parent_id for p in prs):
|
||||||
for pr in prs:
|
|
||||||
pr_ids.discard(pr.parent_id.id)
|
|
||||||
for pr in (p for p in prs if p.id in pr_ids):
|
|
||||||
self.env.ref('runbot_merge.forwardport.reminder')._send(
|
self.env.ref('runbot_merge.forwardport.reminder')._send(
|
||||||
repository=pr.repository,
|
repository=pr.repository,
|
||||||
pull_request=pr.number,
|
pull_request=pr.number,
|
||||||
|
117
forwardport/tests/test_backport.py
Normal file
117
forwardport/tests/test_backport.py
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
from xmlrpc.client import Fault
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from utils import make_basic, Commit, to_pr, seen
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def repo(env, config, make_repo):
|
||||||
|
repo, _ = make_basic(env, config, make_repo, statuses="default")
|
||||||
|
return repo
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def pr_id(env, repo, config):
|
||||||
|
with repo:
|
||||||
|
repo.make_commits('c', Commit("c", tree={'x': '1'}), ref='heads/aref')
|
||||||
|
pr = repo.make_pr(target='c', head='aref')
|
||||||
|
repo.post_status('aref', 'success')
|
||||||
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
|
env.run_crons()
|
||||||
|
with repo:
|
||||||
|
repo.post_status('staging.c', 'success')
|
||||||
|
env.run_crons()
|
||||||
|
pr_id = to_pr(env, pr)
|
||||||
|
assert pr_id.merge_date
|
||||||
|
return pr_id
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def backport_id(env, pr_id):
|
||||||
|
action = pr_id.backport()
|
||||||
|
backport_id = env[action['res_model']].browse([action['res_id']])
|
||||||
|
assert backport_id._name == 'runbot_merge.pull_requests.backport'
|
||||||
|
assert backport_id
|
||||||
|
return backport_id
|
||||||
|
|
||||||
|
def test_golden_path(env, repo, config, pr_id, backport_id, users):
|
||||||
|
branch_a, branch_b, _branch_c = env['runbot_merge.branch'].search([], order='name')
|
||||||
|
backport_id.target = branch_a.id
|
||||||
|
act2 = backport_id.action_apply()
|
||||||
|
env.run_crons() # run cron to update labels
|
||||||
|
|
||||||
|
_, bp_id = env['runbot_merge.pull_requests'].search([], order='number')
|
||||||
|
assert bp_id.limit_id == branch_b
|
||||||
|
assert bp_id._name == act2['res_model']
|
||||||
|
assert bp_id.id == act2['res_id']
|
||||||
|
bp_head = repo.commit(bp_id.head)
|
||||||
|
assert repo.read_tree(bp_head) == {
|
||||||
|
'f': 'e',
|
||||||
|
'x': '1',
|
||||||
|
}
|
||||||
|
assert bp_head.message == f"""c
|
||||||
|
|
||||||
|
X-original-commit: {pr_id.head}\
|
||||||
|
"""
|
||||||
|
assert bp_id.message == f"[Backport] c\n\nBackport of {pr_id.display_name}"
|
||||||
|
assert repo.get_pr(bp_id.number).labels == {"backport"}
|
||||||
|
|
||||||
|
# check that the backport can actually be merged and forward-ports successfully...
|
||||||
|
with repo:
|
||||||
|
repo.post_status(bp_id.head, 'success')
|
||||||
|
repo.get_pr(bp_id.number).post_comment("hansen r+", config['role_reviewer']['token'])
|
||||||
|
env.run_crons()
|
||||||
|
with repo:
|
||||||
|
repo.post_status('staging.a', 'success')
|
||||||
|
env.run_crons()
|
||||||
|
_pr, _backport, fw_id = env['runbot_merge.pull_requests'].search([], order='number')
|
||||||
|
fw_pr = repo.get_pr(fw_id.number)
|
||||||
|
assert fw_pr.comments == [
|
||||||
|
seen(env, fw_pr, users),
|
||||||
|
(users['user'], '''\
|
||||||
|
@{user} @{reviewer} this PR targets b and is the last of the forward-port chain.
|
||||||
|
|
||||||
|
To merge the full chain, use
|
||||||
|
> @hansen r+
|
||||||
|
|
||||||
|
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
||||||
|
'''.format_map(users)),
|
||||||
|
]
|
||||||
|
|
||||||
|
def test_conflict(env, repo, config, backport_id):
|
||||||
|
with repo:
|
||||||
|
repo.make_commits('a', Commit('conflict', tree={'x': '-1'}), ref='heads/a', make=False)
|
||||||
|
|
||||||
|
branch_a, _branch_b, _branch_c = env['runbot_merge.branch'].search([], order='name')
|
||||||
|
backport_id.target = branch_a.id
|
||||||
|
with pytest.raises(Fault) as exc:
|
||||||
|
backport_id.action_apply()
|
||||||
|
assert exc.value.faultString == """\
|
||||||
|
backport conflict:
|
||||||
|
|
||||||
|
Auto-merging x
|
||||||
|
CONFLICT (add/add): Merge conflict in x
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_target_error(env, config, backport_id):
|
||||||
|
branch_a, _branch_b, branch_c = env['runbot_merge.branch'].search([], order='name')
|
||||||
|
with pytest.raises(Fault) as exc:
|
||||||
|
backport_id.action_apply()
|
||||||
|
assert exc.value.faultString == "A backport needs a backport target"
|
||||||
|
|
||||||
|
backport_id.target = branch_c.id
|
||||||
|
with pytest.raises(Fault) as exc:
|
||||||
|
backport_id.action_apply()
|
||||||
|
assert exc.value.faultString == "The backport branch needs to be before the source's branch (got 'c' and 'c')"
|
||||||
|
|
||||||
|
backport_id.target = branch_a.id
|
||||||
|
backport_id.action_apply()
|
||||||
|
|
||||||
|
@pytest.mark.skip(
|
||||||
|
reason="Currently no way to make just the PR creation fail, swapping the "
|
||||||
|
"fp_github_token for an invalid one breaks git itself"
|
||||||
|
)
|
||||||
|
def test_pr_fail(env, config, repo, pr_id, backport_id):
|
||||||
|
backport_id.target = env['runbot_merge.branch'].search([], order='name', limit=1).id
|
||||||
|
with pytest.raises(Fault) as exc:
|
||||||
|
backport_id.action_apply()
|
||||||
|
assert exc.value.faultString == 'Backport PR creation failure: '
|
@ -262,7 +262,7 @@ def test_massive_conflict(env, config, make_repo):
|
|||||||
|
|
||||||
|
|
||||||
def test_conflict_deleted(env, config, make_repo):
|
def test_conflict_deleted(env, config, make_repo):
|
||||||
prod, other = make_basic(env, config, make_repo)
|
prod, other = make_basic(env, config, make_repo, statuses="default")
|
||||||
# remove f from b
|
# remove f from b
|
||||||
with prod:
|
with prod:
|
||||||
prod.make_commits(
|
prod.make_commits(
|
||||||
@ -277,18 +277,12 @@ def test_conflict_deleted(env, config, make_repo):
|
|||||||
ref='heads/conflicting'
|
ref='heads/conflicting'
|
||||||
)
|
)
|
||||||
pr = prod.make_pr(target='a', head='conflicting')
|
pr = prod.make_pr(target='a', head='conflicting')
|
||||||
prod.post_status(p_0, 'success', 'legal/cla')
|
prod.post_status(p_0, 'success')
|
||||||
prod.post_status(p_0, 'success', 'ci/runbot')
|
|
||||||
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
with prod:
|
with prod:
|
||||||
prod.post_status('staging.a', 'success', 'legal/cla')
|
prod.post_status('staging.a', 'success')
|
||||||
prod.post_status('staging.a', 'success', 'ci/runbot')
|
|
||||||
|
|
||||||
env.run_crons()
|
|
||||||
# wait a bit for PR webhook... ?
|
|
||||||
time.sleep(5)
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
# should have created a new PR
|
# should have created a new PR
|
||||||
@ -300,9 +294,14 @@ def test_conflict_deleted(env, config, make_repo):
|
|||||||
'g': 'c',
|
'g': 'c',
|
||||||
}
|
}
|
||||||
assert pr1.state == 'opened'
|
assert pr1.state == 'opened'
|
||||||
# NOTE: no actual conflict markers because pr1 essentially adds f de-novo
|
|
||||||
assert prod.read_tree(prod.commit(pr1.head)) == {
|
assert prod.read_tree(prod.commit(pr1.head)) == {
|
||||||
'f': 'xxx',
|
'f': matches("""\
|
||||||
|
<<<\x3c<<< $$
|
||||||
|
||||||| $$
|
||||||
|
=======
|
||||||
|
xxx
|
||||||
|
>>>\x3e>>> $$
|
||||||
|
"""),
|
||||||
'g': 'c',
|
'g': 'c',
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -333,6 +332,89 @@ def test_conflict_deleted(env, config, make_repo):
|
|||||||
}
|
}
|
||||||
assert pr1.state == 'opened', "state should be open still"
|
assert pr1.state == 'opened', "state should be open still"
|
||||||
|
|
||||||
|
|
||||||
|
def test_conflict_deleted_deep(env, config, make_repo):
|
||||||
|
""" Same as the previous one but files are deeper than toplevel, and we only
|
||||||
|
want to see if the conflict post-processing works.
|
||||||
|
"""
|
||||||
|
# region: setup
|
||||||
|
prod = make_repo("test")
|
||||||
|
env['runbot_merge.events_sources'].create({'repository': prod.name})
|
||||||
|
with prod:
|
||||||
|
[a, b] = prod.make_commits(
|
||||||
|
None,
|
||||||
|
Commit("a", tree={
|
||||||
|
"foo/bar/baz": "1",
|
||||||
|
"foo/bar/qux": "1",
|
||||||
|
"corge/grault": "1",
|
||||||
|
}),
|
||||||
|
Commit("b", tree={"foo/bar/qux": "2"}, reset=True),
|
||||||
|
)
|
||||||
|
prod.make_ref("heads/a", a)
|
||||||
|
prod.make_ref("heads/b", b)
|
||||||
|
|
||||||
|
project = env['runbot_merge.project'].create({
|
||||||
|
'name': "test",
|
||||||
|
'github_token': config['github']['token'],
|
||||||
|
'github_prefix': 'hansen',
|
||||||
|
'fp_github_token': config['github']['token'],
|
||||||
|
'fp_github_name': 'herbert',
|
||||||
|
'branch_ids': [
|
||||||
|
(0, 0, {'name': 'a', 'sequence': 100}),
|
||||||
|
(0, 0, {'name': 'b', 'sequence': 80}),
|
||||||
|
],
|
||||||
|
"repo_ids": [
|
||||||
|
(0, 0, {
|
||||||
|
'name': prod.name,
|
||||||
|
'required_statuses': "default",
|
||||||
|
'fp_remote_target': prod.fork().name,
|
||||||
|
'group_id': False,
|
||||||
|
})
|
||||||
|
]
|
||||||
|
})
|
||||||
|
env['res.partner'].search([
|
||||||
|
('github_login', '=', config['role_reviewer']['user'])
|
||||||
|
]).write({
|
||||||
|
'review_rights': [(0, 0, {'repository_id': project.repo_ids.id, 'review': True})]
|
||||||
|
})
|
||||||
|
# endregion
|
||||||
|
|
||||||
|
with prod:
|
||||||
|
prod.make_commits(
|
||||||
|
'a',
|
||||||
|
Commit("c", tree={
|
||||||
|
"foo/bar/baz": "2",
|
||||||
|
"corge/grault": "insert funny number",
|
||||||
|
}),
|
||||||
|
ref="heads/conflicting",
|
||||||
|
)
|
||||||
|
pr = prod.make_pr(target='a', head='conflicting')
|
||||||
|
prod.post_status("conflicting", "success")
|
||||||
|
pr.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
with prod:
|
||||||
|
prod.post_status('staging.a', 'success')
|
||||||
|
env.run_crons()
|
||||||
|
_, pr1 = env['runbot_merge.pull_requests'].search([], order='number')
|
||||||
|
assert prod.read_tree(prod.commit(pr1.head), recursive=True) == {
|
||||||
|
"foo/bar/qux": "2",
|
||||||
|
"foo/bar/baz": """\
|
||||||
|
<<<<<<< HEAD
|
||||||
|
||||||| MERGE BASE
|
||||||
|
=======
|
||||||
|
2
|
||||||
|
>>>>>>> FORWARD PORTED
|
||||||
|
""",
|
||||||
|
"corge/grault": """\
|
||||||
|
<<<<<<< HEAD
|
||||||
|
||||||| MERGE BASE
|
||||||
|
=======
|
||||||
|
insert funny number
|
||||||
|
>>>>>>> FORWARD PORTED
|
||||||
|
"""
|
||||||
|
}, "the conflicting file should have had conflict markers fixed in"
|
||||||
|
|
||||||
def test_multiple_commits_same_authorship(env, config, make_repo):
|
def test_multiple_commits_same_authorship(env, config, make_repo):
|
||||||
""" When a PR has multiple commits by the same author and its
|
""" When a PR has multiple commits by the same author and its
|
||||||
forward-porting triggers a conflict, the resulting (squashed) conflict
|
forward-porting triggers a conflict, the resulting (squashed) conflict
|
||||||
@ -436,13 +518,13 @@ def test_multiple_commits_different_authorship(env, config, make_repo, users, ro
|
|||||||
"author set to the bot but an empty email"
|
"author set to the bot but an empty email"
|
||||||
assert get(c.committer) == (bot, '')
|
assert get(c.committer) == (bot, '')
|
||||||
|
|
||||||
assert re.match(r'''<<<\x3c<<< HEAD
|
assert prod.read_tree(c)['g'] == matches('''<<<\x3c<<< b
|
||||||
b
|
b
|
||||||
|||||||| parent of [\da-f]{7,}.*
|
||||||| $$
|
||||||
=======
|
=======
|
||||||
2
|
2
|
||||||
>>>\x3e>>> [\da-f]{7,}.*
|
>>>\x3e>>> $$
|
||||||
''', prod.read_tree(c)['g'])
|
''')
|
||||||
|
|
||||||
# I'd like to fix the conflict so everything is clean and proper *but*
|
# I'd like to fix the conflict so everything is clean and proper *but*
|
||||||
# github's API apparently rejects creating commits with an empty email.
|
# github's API apparently rejects creating commits with an empty email.
|
||||||
@ -459,7 +541,7 @@ b
|
|||||||
|
|
||||||
assert pr2.comments == [
|
assert pr2.comments == [
|
||||||
seen(env, pr2, users),
|
seen(env, pr2, users),
|
||||||
(users['user'], matches('@%s @%s $$CONFLICT' % (users['user'], users['reviewer']))),
|
(users['user'], matches('@%(user)s @%(reviewer)s $$CONFLICT' % users)),
|
||||||
(users['reviewer'], 'hansen r+'),
|
(users['reviewer'], 'hansen r+'),
|
||||||
(users['user'], f"@{users['user']} @{users['reviewer']} unable to stage: "
|
(users['user'], f"@{users['user']} @{users['reviewer']} unable to stage: "
|
||||||
"All commits must have author and committer email, "
|
"All commits must have author and committer email, "
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
import lxml.html
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from utils import seen, Commit, make_basic, to_pr
|
from utils import seen, Commit, make_basic, to_pr
|
||||||
@ -76,6 +77,92 @@ def test_ignore(env, config, make_repo, users):
|
|||||||
(users['user'], "Disabled forward-porting."),
|
(users['user'], "Disabled forward-porting."),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
with prod:
|
||||||
|
pr.post_comment('hansen up to c', config['role_reviewer']['token'])
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
assert env['runbot_merge.pull_requests'].search([]) == pr_id,\
|
||||||
|
"should still not have created a forward port"
|
||||||
|
assert pr.comments[6:] == [
|
||||||
|
(users['reviewer'], 'hansen up to c'),
|
||||||
|
(users['user'], "@%s can not forward-port, policy is 'no' on %s" % (
|
||||||
|
users['reviewer'],
|
||||||
|
pr_id.display_name,
|
||||||
|
))
|
||||||
|
]
|
||||||
|
|
||||||
|
assert pr_id.limit_id == env['runbot_merge.branch'].search([('name', '=', 'c')])
|
||||||
|
pr_id.limit_id = False
|
||||||
|
|
||||||
|
with prod:
|
||||||
|
pr.post_comment("hansen fw=default", config['role_reviewer']['token'])
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
assert pr.comments[8:] == [
|
||||||
|
(users['reviewer'], "hansen fw=default"),
|
||||||
|
(users['user'], "Starting forward-port. Waiting for CI to create followup forward-ports.")
|
||||||
|
]
|
||||||
|
|
||||||
|
assert env['runbot_merge.pull_requests'].search([('source_id', '=', pr_id.id)]),\
|
||||||
|
"should finally have created a forward port"
|
||||||
|
|
||||||
|
|
||||||
|
def test_unignore(env, config, make_repo, users, page):
|
||||||
|
env['res.partner'].create({'name': users['other'], 'github_login': users['other'], 'email': 'other@example.org'})
|
||||||
|
|
||||||
|
prod, _ = make_basic(env, config, make_repo, statuses="default")
|
||||||
|
token = config['role_other']['token']
|
||||||
|
fork = prod.fork(token=token)
|
||||||
|
with prod, fork:
|
||||||
|
[c] = fork.make_commits('a', Commit('c', tree={'0': '0'}), ref='heads/mybranch')
|
||||||
|
pr = prod.make_pr(target='a', head=f'{fork.owner}:mybranch', title="title", token=token)
|
||||||
|
prod.post_status(c, 'success')
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
pr_id = to_pr(env, pr)
|
||||||
|
assert pr_id.batch_id.fw_policy == 'default'
|
||||||
|
|
||||||
|
doc = lxml.html.fromstring(page(f'/{prod.name}/pull/{pr.number}'))
|
||||||
|
assert len(doc.cssselect("table > tbody > tr")) == 3, \
|
||||||
|
"the overview table should have as many rows as there are tables"
|
||||||
|
|
||||||
|
with prod:
|
||||||
|
pr.post_comment('hansen fw=no', token)
|
||||||
|
env.run_crons(None)
|
||||||
|
assert pr_id.batch_id.fw_policy == 'no'
|
||||||
|
|
||||||
|
doc = lxml.html.fromstring(page(f'/{prod.name}/pull/{pr.number}'))
|
||||||
|
assert len(doc.cssselect("table > tbody > tr")) == 1, \
|
||||||
|
"if fw=no, there should be just one row"
|
||||||
|
|
||||||
|
with prod:
|
||||||
|
pr.post_comment('hansen fw=default', token)
|
||||||
|
env.run_crons(None)
|
||||||
|
assert pr_id.batch_id.fw_policy == 'default'
|
||||||
|
|
||||||
|
with prod:
|
||||||
|
pr.post_comment('hansen fw=no', token)
|
||||||
|
env.run_crons(None)
|
||||||
|
with prod:
|
||||||
|
pr.post_comment('hansen up to c', token)
|
||||||
|
env.run_crons(None)
|
||||||
|
assert pr_id.batch_id.fw_policy == 'default'
|
||||||
|
|
||||||
|
assert pr.comments == [
|
||||||
|
seen(env, pr, users),
|
||||||
|
(users['other'], "hansen fw=no"),
|
||||||
|
(users['user'], "Disabled forward-porting."),
|
||||||
|
(users['other'], "hansen fw=default"),
|
||||||
|
(users['user'], "Waiting for CI to create followup forward-ports."),
|
||||||
|
(users['other'], "hansen fw=no"),
|
||||||
|
(users['user'], "Disabled forward-porting."),
|
||||||
|
(users['other'], "hansen up to c"),
|
||||||
|
(users['user'], "Forward-porting to 'c'. Re-enabled forward-porting "\
|
||||||
|
"(you should use `fw=default` to re-enable forward "\
|
||||||
|
"porting after disabling)."
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
def test_disable(env, config, make_repo, users):
|
def test_disable(env, config, make_repo, users):
|
||||||
""" Checks behaviour if the limit target is disabled:
|
""" Checks behaviour if the limit target is disabled:
|
||||||
|
|
||||||
|
@ -251,7 +251,7 @@ def test_empty(env, config, make_repo, users):
|
|||||||
""" Cherrypick of an already cherrypicked (or separately implemented)
|
""" Cherrypick of an already cherrypicked (or separately implemented)
|
||||||
commit -> conflicting pr.
|
commit -> conflicting pr.
|
||||||
"""
|
"""
|
||||||
prod, other = make_basic(env, config, make_repo)
|
prod, other = make_basic(env, config, make_repo, statuses="default")
|
||||||
# merge change to b
|
# merge change to b
|
||||||
with prod:
|
with prod:
|
||||||
[p_0] = prod.make_commits(
|
[p_0] = prod.make_commits(
|
||||||
@ -259,13 +259,11 @@ def test_empty(env, config, make_repo, users):
|
|||||||
ref='heads/early'
|
ref='heads/early'
|
||||||
)
|
)
|
||||||
pr0 = prod.make_pr(target='b', head='early')
|
pr0 = prod.make_pr(target='b', head='early')
|
||||||
prod.post_status(p_0, 'success', 'legal/cla')
|
prod.post_status(p_0, 'success')
|
||||||
prod.post_status(p_0, 'success', 'ci/runbot')
|
|
||||||
pr0.post_comment('hansen r+', config['role_reviewer']['token'])
|
pr0.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
with prod:
|
with prod:
|
||||||
prod.post_status('staging.b', 'success', 'legal/cla')
|
prod.post_status('staging.b', 'success')
|
||||||
prod.post_status('staging.b', 'success', 'ci/runbot')
|
|
||||||
|
|
||||||
# merge same change to a afterwards
|
# merge same change to a afterwards
|
||||||
with prod:
|
with prod:
|
||||||
@ -274,15 +272,13 @@ def test_empty(env, config, make_repo, users):
|
|||||||
ref='heads/late'
|
ref='heads/late'
|
||||||
)
|
)
|
||||||
pr1 = prod.make_pr(target='a', head='late')
|
pr1 = prod.make_pr(target='a', head='late')
|
||||||
prod.post_status(p_1, 'success', 'legal/cla')
|
prod.post_status(p_1, 'success')
|
||||||
prod.post_status(p_1, 'success', 'ci/runbot')
|
|
||||||
pr1.post_comment('hansen r+', config['role_reviewer']['token'])
|
pr1.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
with prod:
|
with prod:
|
||||||
prod.post_status('staging.a', 'success', 'legal/cla')
|
prod.post_status('staging.a', 'success')
|
||||||
prod.post_status('staging.a', 'success', 'ci/runbot')
|
|
||||||
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
assert prod.read_tree(prod.commit('a')) == {
|
assert prod.read_tree(prod.commit('a')) == {
|
||||||
'f': 'e',
|
'f': 'e',
|
||||||
'x': '0',
|
'x': '0',
|
||||||
@ -315,7 +311,8 @@ def test_empty(env, config, make_repo, users):
|
|||||||
}
|
}
|
||||||
|
|
||||||
with prod:
|
with prod:
|
||||||
validate_all([prod], [fp_id.head, fail_id.head])
|
prod.post_status(fp_id.head, 'success')
|
||||||
|
prod.post_status(fail_id.head, 'success')
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
# should not have created any new PR
|
# should not have created any new PR
|
||||||
@ -379,12 +376,28 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
|||||||
(users['reviewer'], 'hansen r+'),
|
(users['reviewer'], 'hansen r+'),
|
||||||
seen(env, pr1, users),
|
seen(env, pr1, users),
|
||||||
]
|
]
|
||||||
assert fail_pr.comments == [
|
assert fail_pr.comments[2:] == [awaiting]*2,\
|
||||||
seen(env, fail_pr, users),
|
"message should not be triggered on closed PR"
|
||||||
conflict,
|
|
||||||
|
with prod:
|
||||||
|
fail_pr.open()
|
||||||
|
with prod:
|
||||||
|
prod.post_status(fail_id.head, 'success')
|
||||||
|
assert fail_id.state == 'validated'
|
||||||
|
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
||||||
|
assert fail_pr.comments[2:] == [awaiting]*3, "check that message triggers again"
|
||||||
|
|
||||||
|
with prod:
|
||||||
|
fail_pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
|
assert fail_id.state == 'ready'
|
||||||
|
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
||||||
|
assert fail_pr.comments[2:] == [
|
||||||
awaiting,
|
awaiting,
|
||||||
awaiting,
|
awaiting,
|
||||||
], "each cron run should trigger a new message"
|
awaiting,
|
||||||
|
(users['reviewer'], "hansen r+"),
|
||||||
|
],"if a PR is ready (unblocked), the reminder should not trigger as "\
|
||||||
|
"we're likely waiting for the mergebot"
|
||||||
|
|
||||||
def test_partially_empty(env, config, make_repo):
|
def test_partially_empty(env, config, make_repo):
|
||||||
""" Check what happens when only some commits of the PR are now empty
|
""" Check what happens when only some commits of the PR are now empty
|
||||||
@ -616,6 +629,14 @@ def test_disapproval(env, config, make_repo, users):
|
|||||||
"sibling forward ports may have to be unapproved "
|
"sibling forward ports may have to be unapproved "
|
||||||
"individually."),
|
"individually."),
|
||||||
]
|
]
|
||||||
|
with prod:
|
||||||
|
pr2.post_comment('hansen r-', token=config['role_other']['token'])
|
||||||
|
env.run_crons(None)
|
||||||
|
assert pr2_id.state == 'validated'
|
||||||
|
assert pr2.comments[2:] == [
|
||||||
|
(users['other'], "hansen r+"),
|
||||||
|
(users['other'], "hansen r-"),
|
||||||
|
], "shouldn't have a warning on r- because it's the only approved PR of the chain"
|
||||||
|
|
||||||
def test_delegate_fw(env, config, make_repo, users):
|
def test_delegate_fw(env, config, make_repo, users):
|
||||||
"""If a user is delegated *on a forward port* they should be able to approve
|
"""If a user is delegated *on a forward port* they should be able to approve
|
||||||
@ -692,6 +713,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
|||||||
'''.format_map(users)),
|
'''.format_map(users)),
|
||||||
(users['other'], 'hansen r+')
|
(users['other'], 'hansen r+')
|
||||||
]
|
]
|
||||||
|
assert pr2_id.reviewed_by
|
||||||
|
|
||||||
|
|
||||||
def test_redundant_approval(env, config, make_repo, users):
|
def test_redundant_approval(env, config, make_repo, users):
|
||||||
|
@ -3,7 +3,7 @@ from datetime import datetime, timedelta
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from utils import seen, Commit, to_pr, make_basic
|
from utils import seen, Commit, to_pr, make_basic, prevent_unstaging
|
||||||
|
|
||||||
|
|
||||||
def test_no_token(env, config, make_repo):
|
def test_no_token(env, config, make_repo):
|
||||||
@ -136,6 +136,64 @@ def test_failed_staging(env, config, make_repo):
|
|||||||
assert pr3_id.state == 'ready'
|
assert pr3_id.state == 'ready'
|
||||||
assert pr3_id.staging_id
|
assert pr3_id.staging_id
|
||||||
|
|
||||||
|
def test_fw_retry(env, config, make_repo, users):
|
||||||
|
prod, _ = make_basic(env, config, make_repo, statuses='default')
|
||||||
|
other_token = config['role_other']['token']
|
||||||
|
fork = prod.fork(token=other_token)
|
||||||
|
with prod, fork:
|
||||||
|
fork.make_commits('a', Commit('c', tree={'a': '0'}), ref='heads/abranch')
|
||||||
|
pr1 = prod.make_pr(
|
||||||
|
title="whatever",
|
||||||
|
target='a',
|
||||||
|
head=f'{fork.owner}:abranch',
|
||||||
|
token=other_token,
|
||||||
|
)
|
||||||
|
prod.post_status(pr1.head, 'success')
|
||||||
|
pr1.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
other_partner = env['res.partner'].search([('github_login', '=', users['other'])])
|
||||||
|
assert len(other_partner) == 1
|
||||||
|
other_partner.email = "foo@example.com"
|
||||||
|
|
||||||
|
with prod:
|
||||||
|
prod.post_status('staging.a', 'success')
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
_pr1_id, pr2_id = env['runbot_merge.pull_requests'].search([], order='number')
|
||||||
|
pr2 = prod.get_pr(pr2_id.number)
|
||||||
|
with prod:
|
||||||
|
prod.post_status(pr2_id.head, 'success')
|
||||||
|
pr2.post_comment('hansen r+', other_token)
|
||||||
|
env.run_crons()
|
||||||
|
assert not pr2_id.blocked
|
||||||
|
|
||||||
|
with prod:
|
||||||
|
prod.post_status('staging.b', 'failure')
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
assert pr2_id.error
|
||||||
|
with prod:
|
||||||
|
pr2.post_comment('hansen r+', other_token)
|
||||||
|
env.run_crons()
|
||||||
|
assert pr2_id.state == 'error'
|
||||||
|
with prod:
|
||||||
|
pr2.post_comment('hansen retry', other_token)
|
||||||
|
env.run_crons()
|
||||||
|
assert pr2_id.state == 'ready'
|
||||||
|
|
||||||
|
assert pr2.comments == [
|
||||||
|
seen(env, pr2, users),
|
||||||
|
(users['user'], "This PR targets b and is part of the forward-port chain. Further PRs will be created up to c.\n\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n"),
|
||||||
|
(users['other'], 'hansen r+'),
|
||||||
|
(users['user'], "@{other} @{reviewer} staging failed: default".format_map(users)),
|
||||||
|
|
||||||
|
(users['other'], 'hansen r+'),
|
||||||
|
(users['user'], "This PR is already reviewed, it's in error, you might want to `retry` it instead (if you have already confirmed the error is not legitimate)."),
|
||||||
|
|
||||||
|
(users['other'], 'hansen retry'),
|
||||||
|
]
|
||||||
|
|
||||||
class TestNotAllBranches:
|
class TestNotAllBranches:
|
||||||
""" Check that forward-ports don't behave completely insanely when not all
|
""" Check that forward-ports don't behave completely insanely when not all
|
||||||
branches are supported on all repositories.
|
branches are supported on all repositories.
|
||||||
@ -862,13 +920,12 @@ def test_missing_magic_ref(env, config, make_repo):
|
|||||||
Emulate this behaviour by updating the PR with a commit which lives in the
|
Emulate this behaviour by updating the PR with a commit which lives in the
|
||||||
repo but has no ref.
|
repo but has no ref.
|
||||||
"""
|
"""
|
||||||
prod, _ = make_basic(env, config, make_repo)
|
prod, _ = make_basic(env, config, make_repo, statuses='default')
|
||||||
a_head = prod.commit('refs/heads/a')
|
a_head = prod.commit('refs/heads/a')
|
||||||
with prod:
|
with prod:
|
||||||
[c] = prod.make_commits(a_head.id, Commit('x', tree={'x': '0'}), ref='heads/change')
|
[c] = prod.make_commits(a_head.id, Commit('x', tree={'x': '0'}), ref='heads/change')
|
||||||
pr = prod.make_pr(target='a', head='change')
|
pr = prod.make_pr(target='a', head='change')
|
||||||
prod.post_status(c, 'success', 'legal/cla')
|
prod.post_status(c, 'success')
|
||||||
prod.post_status(c, 'success', 'ci/runbot')
|
|
||||||
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
@ -877,10 +934,10 @@ def test_missing_magic_ref(env, config, make_repo):
|
|||||||
pr_id = to_pr(env, pr)
|
pr_id = to_pr(env, pr)
|
||||||
assert pr_id.staging_id
|
assert pr_id.staging_id
|
||||||
|
|
||||||
pr_id.head = '0'*40
|
with prevent_unstaging(pr_id.staging_id):
|
||||||
|
pr_id.head = '0'*40
|
||||||
with prod:
|
with prod:
|
||||||
prod.post_status('staging.a', 'success', 'legal/cla')
|
prod.post_status('staging.a', 'success')
|
||||||
prod.post_status('staging.a', 'success', 'ci/runbot')
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
assert not pr_id.staging_id
|
assert not pr_id.staging_id
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
import contextlib
|
||||||
import itertools
|
import itertools
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
|
import typing
|
||||||
|
|
||||||
from lxml import html
|
from lxml import html
|
||||||
|
|
||||||
@ -132,7 +134,7 @@ def make_basic(
|
|||||||
prod = make_repo(reponame)
|
prod = make_repo(reponame)
|
||||||
env['runbot_merge.events_sources'].create({'repository': prod.name})
|
env['runbot_merge.events_sources'].create({'repository': prod.name})
|
||||||
with prod:
|
with prod:
|
||||||
a_0, a_1, a_2, a_3, a_4, = prod.make_commits(
|
_0, _1, a_2, _3, _4, = prod.make_commits(
|
||||||
None,
|
None,
|
||||||
Commit("0", tree={'f': 'a'}),
|
Commit("0", tree={'f': 'a'}),
|
||||||
Commit("1", tree={'f': 'b'}),
|
Commit("1", tree={'f': 'b'}),
|
||||||
@ -141,7 +143,7 @@ def make_basic(
|
|||||||
Commit("4", tree={'f': 'e'}),
|
Commit("4", tree={'f': 'e'}),
|
||||||
ref='heads/a',
|
ref='heads/a',
|
||||||
)
|
)
|
||||||
b_1, b_2 = prod.make_commits(
|
b_1, _2 = prod.make_commits(
|
||||||
a_2,
|
a_2,
|
||||||
Commit('11', tree={'g': 'a'}),
|
Commit('11', tree={'g': 'a'}),
|
||||||
Commit('22', tree={'g': 'b'}),
|
Commit('22', tree={'g': 'b'}),
|
||||||
@ -200,3 +202,30 @@ Signed-off-by: {pr_id.reviewed_by.formatted_email}"""
|
|||||||
def ensure_one(records):
|
def ensure_one(records):
|
||||||
assert len(records) == 1
|
assert len(records) == 1
|
||||||
return records
|
return records
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def prevent_unstaging(st) -> None:
|
||||||
|
# hack: `Stagings.cancel` only cancels *active* stagings,
|
||||||
|
# so if we disable the staging, do a thing, then re-enable
|
||||||
|
# then things should work out
|
||||||
|
assert st and st.active, "preventing unstaging of not-a-staging is not useful"
|
||||||
|
st.active = False
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
st.active = True
|
||||||
|
|
||||||
|
|
||||||
|
TYPE_MAPPING = {
|
||||||
|
'boolean': 'integer',
|
||||||
|
'date': 'datetime',
|
||||||
|
'monetary': 'float',
|
||||||
|
'selection': 'char',
|
||||||
|
'many2one': 'char',
|
||||||
|
}
|
||||||
|
def read_tracking_value(tv) -> tuple[str, typing.Any, typing.Any]:
|
||||||
|
field_id = tv.field_id if 'field_id' in tv else tv.field
|
||||||
|
field_type = field_id.field_type if 'field_type' in field_id else field_id.ttype
|
||||||
|
t = TYPE_MAPPING.get(field_type) or field_type
|
||||||
|
return field_id.name, tv[f"old_value_{t}"], tv[f"new_value_{t}"]
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
'views/templates.xml',
|
'views/templates.xml',
|
||||||
'models/project_freeze/views.xml',
|
'models/project_freeze/views.xml',
|
||||||
'models/staging_cancel/views.xml',
|
'models/staging_cancel/views.xml',
|
||||||
|
'models/backport/views.xml',
|
||||||
],
|
],
|
||||||
'assets': {
|
'assets': {
|
||||||
'web._assets_primary_variables': [
|
'web._assets_primary_variables': [
|
||||||
|
@ -2,6 +2,7 @@ import hashlib
|
|||||||
import hmac
|
import hmac
|
||||||
import logging
|
import logging
|
||||||
import json
|
import json
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
import sentry_sdk
|
import sentry_sdk
|
||||||
import werkzeug.exceptions
|
import werkzeug.exceptions
|
||||||
@ -210,12 +211,8 @@ def handle_pr(env, event):
|
|||||||
updates['target'] = branch.id
|
updates['target'] = branch.id
|
||||||
updates['squash'] = pr['commits'] == 1
|
updates['squash'] = pr['commits'] == 1
|
||||||
|
|
||||||
# turns out github doesn't bother sending a change key if the body is
|
if 'title' in event['changes'] or 'body' in event['changes']:
|
||||||
# changing from empty (None), therefore ignore that entirely, just
|
updates['message'] = utils.make_message(pr)
|
||||||
# generate the message and check if it changed
|
|
||||||
message = utils.make_message(pr)
|
|
||||||
if message != pr_obj.message:
|
|
||||||
updates['message'] = message
|
|
||||||
|
|
||||||
_logger.info("update: %s = %s (by %s)", pr_obj.display_name, updates, event['sender']['login'])
|
_logger.info("update: %s = %s (by %s)", pr_obj.display_name, updates, event['sender']['login'])
|
||||||
if updates:
|
if updates:
|
||||||
@ -286,9 +283,6 @@ def handle_pr(env, event):
|
|||||||
_logger.error("PR %s sync %s -> %s => failure (closed)", pr_obj.display_name, pr_obj.head, pr['head']['sha'])
|
_logger.error("PR %s sync %s -> %s => failure (closed)", pr_obj.display_name, pr_obj.head, pr['head']['sha'])
|
||||||
return "It's my understanding that closed/merged PRs don't get sync'd"
|
return "It's my understanding that closed/merged PRs don't get sync'd"
|
||||||
|
|
||||||
if pr_obj.state == 'ready':
|
|
||||||
pr_obj.unstage("updated by %s", event['sender']['login'])
|
|
||||||
|
|
||||||
_logger.info(
|
_logger.info(
|
||||||
"PR %s sync %s -> %s by %s => reset to 'open' and squash=%s",
|
"PR %s sync %s -> %s by %s => reset to 'open' and squash=%s",
|
||||||
pr_obj.display_name,
|
pr_obj.display_name,
|
||||||
@ -297,6 +291,12 @@ def handle_pr(env, event):
|
|||||||
event['sender']['login'],
|
event['sender']['login'],
|
||||||
pr['commits'] == 1
|
pr['commits'] == 1
|
||||||
)
|
)
|
||||||
|
if pr['base']['ref'] != pr_obj.target.name:
|
||||||
|
env['runbot_merge.fetch_job'].create({
|
||||||
|
'repository': pr_obj.repository.id,
|
||||||
|
'number': pr_obj.number,
|
||||||
|
'commits_at': datetime.now() + timedelta(minutes=5),
|
||||||
|
})
|
||||||
|
|
||||||
pr_obj.write({
|
pr_obj.write({
|
||||||
'reviewed_by': False,
|
'reviewed_by': False,
|
||||||
@ -362,7 +362,8 @@ def handle_status(env, event):
|
|||||||
event['context']: {
|
event['context']: {
|
||||||
'state': event['state'],
|
'state': event['state'],
|
||||||
'target_url': event['target_url'],
|
'target_url': event['target_url'],
|
||||||
'description': event['description']
|
'description': event['description'],
|
||||||
|
'updated_at': datetime.now().isoformat(timespec='seconds'),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
# create status, or merge update into commit *unless* the update is already
|
# create status, or merge update into commit *unless* the update is already
|
||||||
@ -374,7 +375,7 @@ def handle_status(env, event):
|
|||||||
SET to_check = true,
|
SET to_check = true,
|
||||||
statuses = c.statuses::jsonb || EXCLUDED.statuses::jsonb
|
statuses = c.statuses::jsonb || EXCLUDED.statuses::jsonb
|
||||||
WHERE NOT c.statuses::jsonb @> EXCLUDED.statuses::jsonb
|
WHERE NOT c.statuses::jsonb @> EXCLUDED.statuses::jsonb
|
||||||
""", [event['sha'], status_value])
|
""", [event['sha'], status_value], log_exceptions=False)
|
||||||
env.ref("runbot_merge.process_updated_commits")._trigger()
|
env.ref("runbot_merge.process_updated_commits")._trigger()
|
||||||
|
|
||||||
return 'ok'
|
return 'ok'
|
||||||
@ -431,7 +432,9 @@ def _handle_comment(env, repo, issue, comment, target=None):
|
|||||||
if not repository.project_id._find_commands(comment['body'] or ''):
|
if not repository.project_id._find_commands(comment['body'] or ''):
|
||||||
return "No commands, ignoring"
|
return "No commands, ignoring"
|
||||||
|
|
||||||
pr = env['runbot_merge.pull_requests']._get_or_schedule(repo, issue, target=target)
|
pr = env['runbot_merge.pull_requests']._get_or_schedule(
|
||||||
|
repo, issue, target=target, commenter=comment['user']['login'],
|
||||||
|
)
|
||||||
if not pr:
|
if not pr:
|
||||||
return "Unknown PR, scheduling fetch"
|
return "Unknown PR, scheduling fetch"
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@ from __future__ import annotations
|
|||||||
import base64
|
import base64
|
||||||
import collections
|
import collections
|
||||||
import colorsys
|
import colorsys
|
||||||
|
import datetime
|
||||||
import hashlib
|
import hashlib
|
||||||
import io
|
import io
|
||||||
import json
|
import json
|
||||||
@ -160,7 +161,7 @@ def raster_render(pr):
|
|||||||
# last-modified should be in RFC2822 format, which is what
|
# last-modified should be in RFC2822 format, which is what
|
||||||
# email.utils.formatdate does (sadly takes a timestamp but...)
|
# email.utils.formatdate does (sadly takes a timestamp but...)
|
||||||
last_modified = formatdate(max((
|
last_modified = formatdate(max((
|
||||||
o.write_date
|
o.write_date or datetime.datetime.min
|
||||||
for o in chain(
|
for o in chain(
|
||||||
project,
|
project,
|
||||||
repos,
|
repos,
|
||||||
|
@ -38,7 +38,7 @@
|
|||||||
<field name="state">code</field>
|
<field name="state">code</field>
|
||||||
<field name="code">model._send()</field>
|
<field name="code">model._send()</field>
|
||||||
<field name="interval_number">10</field>
|
<field name="interval_number">10</field>
|
||||||
<field name="interval_type">minutes</field>
|
<field name="interval_type">hours</field>
|
||||||
<field name="numbercall">-1</field>
|
<field name="numbercall">-1</field>
|
||||||
<field name="doall" eval="False"/>
|
<field name="doall" eval="False"/>
|
||||||
<field name="priority">70</field>
|
<field name="priority">70</field>
|
||||||
|
@ -16,7 +16,7 @@ runbot_merge.pr.load.unmanaged,"Branch `{pr[base][ref]}` is not within my remit,
|
|||||||
|
|
||||||
pr: pull request (github object)
|
pr: pull request (github object)
|
||||||
Repository: repository object (???)"
|
Repository: repository object (???)"
|
||||||
runbot_merge.pr.load.fetched,"{pr.ping}I didn't know about this PR and had to retrieve its information, you may have to re-approve it as I didn't see previous commands.","Notifies that we did retrieve an unknown PR (either by request or as side effect of an interaction).
|
runbot_merge.pr.load.fetched,"{ping}I didn't know about this PR and had to retrieve its information, you may have to re-approve it as I didn't see previous commands.","Notifies that we did retrieve an unknown PR (either by request or as side effect of an interaction).
|
||||||
|
|
||||||
Pr: pr object we just created"
|
Pr: pr object we just created"
|
||||||
runbot_merge.pr.branch.disabled,"{pr.ping}the target branch {pr.target.name!r} has been disabled, you may want to close this PR.","Notifies that the target branch for this PR was deactivated.
|
runbot_merge.pr.branch.disabled,"{pr.ping}the target branch {pr.target.name!r} has been disabled, you may want to close this PR.","Notifies that the target branch for this PR was deactivated.
|
||||||
|
|
@ -5,4 +5,7 @@ class FastForwardError(Exception):
|
|||||||
class Mismatch(MergeError):
|
class Mismatch(MergeError):
|
||||||
pass
|
pass
|
||||||
class Unmergeable(MergeError):
|
class Unmergeable(MergeError):
|
||||||
...
|
pass
|
||||||
|
|
||||||
|
class Skip(MergeError):
|
||||||
|
pass
|
||||||
|
@ -6,7 +6,9 @@ import pathlib
|
|||||||
import resource
|
import resource
|
||||||
import stat
|
import stat
|
||||||
import subprocess
|
import subprocess
|
||||||
|
from operator import methodcaller
|
||||||
from typing import Optional, TypeVar, Union, Sequence, Tuple, Dict
|
from typing import Optional, TypeVar, Union, Sequence, Tuple, Dict
|
||||||
|
from collections.abc import Iterable, Mapping, Callable
|
||||||
|
|
||||||
from odoo.tools.appdirs import user_cache_dir
|
from odoo.tools.appdirs import user_cache_dir
|
||||||
from .github import MergeError, PrCommit
|
from .github import MergeError, PrCommit
|
||||||
@ -75,6 +77,7 @@ class Repo:
|
|||||||
config.setdefault('stderr', subprocess.PIPE)
|
config.setdefault('stderr', subprocess.PIPE)
|
||||||
self._config = config
|
self._config = config
|
||||||
self._params = ()
|
self._params = ()
|
||||||
|
self.runner = subprocess.run
|
||||||
|
|
||||||
def __getattr__(self, name: str) -> 'GitCommand':
|
def __getattr__(self, name: str) -> 'GitCommand':
|
||||||
return GitCommand(self, name.replace('_', '-'))
|
return GitCommand(self, name.replace('_', '-'))
|
||||||
@ -85,7 +88,7 @@ class Repo:
|
|||||||
+ tuple(itertools.chain.from_iterable(('-c', p) for p in self._params + ALWAYS))\
|
+ tuple(itertools.chain.from_iterable(('-c', p) for p in self._params + ALWAYS))\
|
||||||
+ args
|
+ args
|
||||||
try:
|
try:
|
||||||
return subprocess.run(args, preexec_fn=_bypass_limits, **opts)
|
return self.runner(args, preexec_fn=_bypass_limits, **opts)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
stream = e.stderr or e.stdout
|
stream = e.stderr or e.stdout
|
||||||
if stream:
|
if stream:
|
||||||
@ -245,6 +248,51 @@ class Repo:
|
|||||||
*itertools.chain.from_iterable(('-p', p) for p in parents),
|
*itertools.chain.from_iterable(('-p', p) for p in parents),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def update_tree(self, tree: str, files: Mapping[str, Callable[[Self, str], str]]) -> str:
|
||||||
|
# FIXME: either ignore or process binary files somehow (how does git show conflicts in binary files?)
|
||||||
|
repo = self.stdout().with_config(stderr=None, text=True, check=False, encoding="utf-8")
|
||||||
|
for f, c in files.items():
|
||||||
|
new_contents = c(repo, f)
|
||||||
|
oid = repo \
|
||||||
|
.with_config(input=new_contents) \
|
||||||
|
.hash_object("-w", "--stdin", "--path", f) \
|
||||||
|
.stdout.strip()
|
||||||
|
|
||||||
|
# we need to rewrite every tree from the parent of `f`
|
||||||
|
while f:
|
||||||
|
f, _, local = f.rpartition("/")
|
||||||
|
# tree to update, `{tree}:` works as an alias for tree
|
||||||
|
lstree = repo.ls_tree(f"{tree}:{f}").stdout.splitlines(keepends=False)
|
||||||
|
new_tree = "".join(
|
||||||
|
# tab before name is critical to the format
|
||||||
|
f"{mode} {typ} {oid if name == local else sha}\t{name}\n"
|
||||||
|
for mode, typ, sha, name in map(methodcaller("split", None, 3), lstree)
|
||||||
|
)
|
||||||
|
oid = repo.with_config(input=new_tree, check=True).mktree().stdout.strip()
|
||||||
|
tree = oid
|
||||||
|
return tree
|
||||||
|
|
||||||
|
def modify_delete(self, tree: str, files: Iterable[str]) -> str:
|
||||||
|
"""Updates ``files`` in ``tree`` to add conflict markers to show them
|
||||||
|
as being modify/delete-ed, rather than have only the original content.
|
||||||
|
|
||||||
|
This is because having just content in a valid file is easy to miss,
|
||||||
|
causing file resurrections as they get committed rather than re-removed.
|
||||||
|
|
||||||
|
TODO: maybe extract the diff information compared to before they were removed? idk
|
||||||
|
"""
|
||||||
|
def rewriter(r: Self, f: str) -> str:
|
||||||
|
contents = r.cat_file("-p", f"{tree}:{f}").stdout
|
||||||
|
return f"""\
|
||||||
|
<<<\x3c<<< HEAD
|
||||||
|
||||||| MERGE BASE
|
||||||
|
=======
|
||||||
|
{contents}
|
||||||
|
>>>\x3e>>> FORWARD PORTED
|
||||||
|
"""
|
||||||
|
return self.update_tree(tree, dict.fromkeys(files, rewriter))
|
||||||
|
|
||||||
|
|
||||||
def check(p: subprocess.CompletedProcess) -> subprocess.CompletedProcess:
|
def check(p: subprocess.CompletedProcess) -> subprocess.CompletedProcess:
|
||||||
if not p.returncode:
|
if not p.returncode:
|
||||||
return p
|
return p
|
||||||
|
@ -1,11 +1,14 @@
|
|||||||
from . import mail_thread
|
from . import mail_thread
|
||||||
from . import ir_actions
|
from . import ir_actions
|
||||||
|
from . import ir_ui_view
|
||||||
from . import res_partner
|
from . import res_partner
|
||||||
from . import project
|
from . import project
|
||||||
from . import pull_requests
|
from . import pull_requests
|
||||||
from . import batch
|
from . import batch
|
||||||
|
from . import patcher
|
||||||
from . import project_freeze
|
from . import project_freeze
|
||||||
from . import stagings_create
|
from . import stagings_create
|
||||||
from . import staging_cancel
|
from . import staging_cancel
|
||||||
|
from . import backport
|
||||||
from . import events_sources
|
from . import events_sources
|
||||||
from . import crons
|
from . import crons
|
||||||
|
145
runbot_merge/models/backport/__init__.py
Normal file
145
runbot_merge/models/backport/__init__.py
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import secrets
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from odoo import models, fields
|
||||||
|
from odoo.exceptions import UserError
|
||||||
|
|
||||||
|
from ..batch import Batch
|
||||||
|
from ..project import Project
|
||||||
|
from ..pull_requests import Repository
|
||||||
|
from ... import git
|
||||||
|
|
||||||
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PullRequest(models.Model):
|
||||||
|
_inherit = 'runbot_merge.pull_requests'
|
||||||
|
|
||||||
|
id: int
|
||||||
|
display_name: str
|
||||||
|
project: Project
|
||||||
|
repository: Repository
|
||||||
|
batch_id: Batch
|
||||||
|
|
||||||
|
def backport(self) -> dict:
|
||||||
|
if len(self) != 1:
|
||||||
|
raise UserError(f"Backporting works one PR at a time, got {len(self)}")
|
||||||
|
|
||||||
|
if len(self.batch_id.prs) > 1:
|
||||||
|
raise UserError("Automatic backport of multi-pr batches is not currently supported")
|
||||||
|
|
||||||
|
if not self.project.fp_github_token:
|
||||||
|
raise UserError(f"Can not backport {self.display_name}: no token on project {self.project.display_name}")
|
||||||
|
|
||||||
|
if not self.repository.fp_remote_target:
|
||||||
|
raise UserError(f"Can not backport {self.display_name}: no remote on {self.project.display_name}")
|
||||||
|
|
||||||
|
w = self.env['runbot_merge.pull_requests.backport'].create({
|
||||||
|
'pr_id': self.id,
|
||||||
|
})
|
||||||
|
return {
|
||||||
|
'type': 'ir.actions.act_window',
|
||||||
|
'name': f"Backport of {self.display_name}",
|
||||||
|
'views': [(False, 'form')],
|
||||||
|
'target': 'new',
|
||||||
|
'res_model': w._name,
|
||||||
|
'res_id': w.id,
|
||||||
|
}
|
||||||
|
|
||||||
|
class PullRequestBackport(models.TransientModel):
|
||||||
|
_name = 'runbot_merge.pull_requests.backport'
|
||||||
|
_description = "PR backport wizard"
|
||||||
|
_rec_name = 'pr_id'
|
||||||
|
|
||||||
|
pr_id = fields.Many2one('runbot_merge.pull_requests', required=True)
|
||||||
|
project_id = fields.Many2one(related='pr_id.repository.project_id')
|
||||||
|
source_seq = fields.Integer(related='pr_id.target.sequence')
|
||||||
|
target = fields.Many2one(
|
||||||
|
'runbot_merge.branch',
|
||||||
|
domain="[('project_id', '=', project_id), ('sequence', '>', source_seq)]",
|
||||||
|
)
|
||||||
|
|
||||||
|
def action_apply(self) -> dict:
|
||||||
|
if not self.target:
|
||||||
|
raise UserError("A backport needs a backport target")
|
||||||
|
|
||||||
|
project = self.pr_id.project
|
||||||
|
branches = project._forward_port_ordered().ids
|
||||||
|
source = self.pr_id.source_id or self.pr_id
|
||||||
|
source_idx = branches.index(source.target.id)
|
||||||
|
if branches.index(self.target.id) >= source_idx:
|
||||||
|
raise UserError(
|
||||||
|
"The backport branch needs to be before the source's branch "
|
||||||
|
f"(got {self.target.name!r} and {source.target.name!r})"
|
||||||
|
)
|
||||||
|
|
||||||
|
_logger.info(
|
||||||
|
"backporting %s (on %s) to %s",
|
||||||
|
self.pr_id.display_name,
|
||||||
|
self.pr_id.target.name,
|
||||||
|
self.target.name,
|
||||||
|
)
|
||||||
|
|
||||||
|
bp_branch = "%s-%s-%s-bp" % (
|
||||||
|
self.target.name,
|
||||||
|
self.pr_id.refname,
|
||||||
|
secrets.token_urlsafe(3),
|
||||||
|
)
|
||||||
|
repo_id = self.pr_id.repository
|
||||||
|
repo = git.get_local(repo_id)
|
||||||
|
|
||||||
|
old_map = self.pr_id.commits_map
|
||||||
|
self.pr_id.commits_map = "{}"
|
||||||
|
conflict, head = self.pr_id._create_port_branch(repo, self.target, forward=False)
|
||||||
|
self.pr_id.commits_map = old_map
|
||||||
|
|
||||||
|
if conflict:
|
||||||
|
feedback = "\n".join(filter(None, conflict[1:3]))
|
||||||
|
raise UserError(f"backport conflict:\n\n{feedback}")
|
||||||
|
repo.push(git.fw_url(repo_id), f"{head}:refs/heads/{bp_branch}")
|
||||||
|
|
||||||
|
self.env.cr.execute('LOCK runbot_merge_pull_requests IN SHARE MODE')
|
||||||
|
|
||||||
|
owner, _repo = repo_id.fp_remote_target.split('/', 1)
|
||||||
|
message = source.message + f"\n\nBackport of {self.pr_id.display_name}"
|
||||||
|
title, body = re.fullmatch(r'(?P<title>[^\n]+)\n*(?P<body>.*)', message, flags=re.DOTALL).groups()
|
||||||
|
|
||||||
|
r = requests.post(
|
||||||
|
f'https://api.github.com/repos/{repo_id.name}/pulls',
|
||||||
|
headers={'Authorization': f'token {project.fp_github_token}'},
|
||||||
|
json={
|
||||||
|
'base': self.target.name,
|
||||||
|
'head': f'{owner}:{bp_branch}',
|
||||||
|
'title': '[Backport]' + ('' if title[0] == '[' else ' ') + title,
|
||||||
|
'body': body
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if not r.ok:
|
||||||
|
raise UserError(f"Backport PR creation failure: {r.text}")
|
||||||
|
|
||||||
|
backport = self.env['runbot_merge.pull_requests']._from_gh(r.json())
|
||||||
|
_logger.info("Created backport %s for %s", backport.display_name, self.pr_id.display_name)
|
||||||
|
|
||||||
|
backport.write({
|
||||||
|
'merge_method': self.pr_id.merge_method,
|
||||||
|
# the backport's own forwardport should stop right before the
|
||||||
|
# original PR by default
|
||||||
|
'limit_id': branches[source_idx - 1],
|
||||||
|
})
|
||||||
|
self.env['runbot_merge.pull_requests.tagging'].create({
|
||||||
|
'repository': repo_id.id,
|
||||||
|
'pull_request': backport.number,
|
||||||
|
'tags_add': ['backport'],
|
||||||
|
})
|
||||||
|
# scheduling fp followup probably doesn't make sense since we don't copy the fw_policy...
|
||||||
|
|
||||||
|
return {
|
||||||
|
'type': 'ir.actions.act_window',
|
||||||
|
'name': "new backport",
|
||||||
|
'views': [(False, 'form')],
|
||||||
|
'res_model': backport._name,
|
||||||
|
'res_id': backport.id,
|
||||||
|
}
|
23
runbot_merge/models/backport/views.xml
Normal file
23
runbot_merge/models/backport/views.xml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
<odoo>
|
||||||
|
<record id="runbot_merge_backport_form" model="ir.ui.view">
|
||||||
|
<field name="name">Backport Wizard</field>
|
||||||
|
<field name="model">runbot_merge.pull_requests.backport</field>
|
||||||
|
<field name="arch" type="xml">
|
||||||
|
<form>
|
||||||
|
<field name="pr_id" invisible="1"/>
|
||||||
|
<field name="project_id" invisible="1"/>
|
||||||
|
<field name="source_seq" invisible="1"/>
|
||||||
|
<field name="target" class="w-100"/>
|
||||||
|
</form>
|
||||||
|
</field>
|
||||||
|
</record>
|
||||||
|
|
||||||
|
<record id="runbot_merge_backport_pr_action" model="ir.actions.server">
|
||||||
|
<field name="name">Perform backport from current PR</field>
|
||||||
|
<field name="type">ir.actions.server</field>
|
||||||
|
<field name="model_id" ref="model_runbot_merge_pull_requests"/>
|
||||||
|
<field name="binding_model_id" ref="model_runbot_merge_pull_requests"/>
|
||||||
|
<field name="state">code</field>
|
||||||
|
<field name="code">action = record.backport()</field>
|
||||||
|
</record>
|
||||||
|
</odoo>
|
@ -1,10 +1,8 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import base64
|
|
||||||
import contextlib
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
|
||||||
import re
|
import re
|
||||||
|
import secrets
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from collections.abc import Iterator
|
from collections.abc import Iterator
|
||||||
|
|
||||||
@ -195,7 +193,7 @@ class Batch(models.Model):
|
|||||||
|
|
||||||
@api.depends(
|
@api.depends(
|
||||||
"merge_date",
|
"merge_date",
|
||||||
"prs.error", "prs.draft", "prs.squash", "prs.merge_method",
|
"prs.error", "prs.draft",
|
||||||
"skipchecks",
|
"skipchecks",
|
||||||
"prs.status", "prs.reviewed_by", "prs.target",
|
"prs.status", "prs.reviewed_by", "prs.target",
|
||||||
)
|
)
|
||||||
@ -208,7 +206,7 @@ class Batch(models.Model):
|
|||||||
elif len(targets := batch.prs.mapped('target')) > 1:
|
elif len(targets := batch.prs.mapped('target')) > 1:
|
||||||
batch.blocked = f"Multiple target branches: {', '.join(targets.mapped('name'))!r}"
|
batch.blocked = f"Multiple target branches: {', '.join(targets.mapped('name'))!r}"
|
||||||
elif blocking := batch.prs.filtered(
|
elif blocking := batch.prs.filtered(
|
||||||
lambda p: p.error or p.draft or not (p.squash or p.merge_method)
|
lambda p: p.error or p.draft
|
||||||
):
|
):
|
||||||
batch.blocked = "Pull request(s) %s blocked." % ', '.join(blocking.mapped('display_name'))
|
batch.blocked = "Pull request(s) %s blocked." % ', '.join(blocking.mapped('display_name'))
|
||||||
elif not batch.skipchecks and (unready := batch.prs.filtered(
|
elif not batch.skipchecks and (unready := batch.prs.filtered(
|
||||||
@ -323,14 +321,12 @@ class Batch(models.Model):
|
|||||||
new_branch = '%s-%s-%s-fw' % (
|
new_branch = '%s-%s-%s-fw' % (
|
||||||
target.name,
|
target.name,
|
||||||
base.refname,
|
base.refname,
|
||||||
# avoid collisions between fp branches (labels can be reused
|
secrets.token_urlsafe(3),
|
||||||
# or conflict especially as we're chopping off the owner)
|
|
||||||
base64.urlsafe_b64encode(os.urandom(3)).decode()
|
|
||||||
)
|
)
|
||||||
conflicts = {}
|
conflicts = {}
|
||||||
for pr in prs:
|
for pr in prs:
|
||||||
repo = git.get_local(pr.repository)
|
repo = git.get_local(pr.repository)
|
||||||
conflicts[pr], head = pr._create_fp_branch(repo, target)
|
conflicts[pr], head = pr._create_port_branch(repo, target, forward=True)
|
||||||
|
|
||||||
repo.push(git.fw_url(pr.repository), f"{head}:refs/heads/{new_branch}")
|
repo.push(git.fw_url(pr.repository), f"{head}:refs/heads/{new_branch}")
|
||||||
|
|
||||||
@ -352,11 +348,11 @@ class Batch(models.Model):
|
|||||||
for p in root | source
|
for p in root | source
|
||||||
)
|
)
|
||||||
|
|
||||||
title, body = re.match(r'(?P<title>[^\n]+)\n*(?P<body>.*)', message, flags=re.DOTALL).groups()
|
title, body = re.fullmatch(r'(?P<title>[^\n]+)\n*(?P<body>.*)', message, flags=re.DOTALL).groups()
|
||||||
r = gh.post(f'https://api.github.com/repos/{pr.repository.name}/pulls', json={
|
r = gh.post(f'https://api.github.com/repos/{pr.repository.name}/pulls', json={
|
||||||
'base': target.name,
|
'base': target.name,
|
||||||
'head': f'{owner}:{new_branch}',
|
'head': f'{owner}:{new_branch}',
|
||||||
'title': '[FW]' + (' ' if title[0] != '[' else '') + title,
|
'title': '[FW]' + ('' if title[0] == '[' else ' ') + title,
|
||||||
'body': body
|
'body': body
|
||||||
})
|
})
|
||||||
if not r.ok:
|
if not r.ok:
|
||||||
@ -432,7 +428,7 @@ class Batch(models.Model):
|
|||||||
_logger.info('-> no parent %s (%s)', batch, prs)
|
_logger.info('-> no parent %s (%s)', batch, prs)
|
||||||
continue
|
continue
|
||||||
if not force_fw and batch.source.fw_policy != 'skipci' \
|
if not force_fw and batch.source.fw_policy != 'skipci' \
|
||||||
and (invalid := batch.prs.filtered(lambda p: p.state not in ['validated', 'ready'])):
|
and (invalid := batch.prs.filtered(lambda p: p.status != 'success')):
|
||||||
_logger.info(
|
_logger.info(
|
||||||
'-> wrong state %s (%s)',
|
'-> wrong state %s (%s)',
|
||||||
batch,
|
batch,
|
||||||
|
@ -72,6 +72,14 @@ class Approve:
|
|||||||
return f"r={ids}"
|
return f"r={ids}"
|
||||||
return 'review+'
|
return 'review+'
|
||||||
|
|
||||||
|
def __contains__(self, item):
|
||||||
|
if self.ids is None:
|
||||||
|
return True
|
||||||
|
return item in self.ids
|
||||||
|
|
||||||
|
def fmt(self):
|
||||||
|
return ", ".join(f"#{n:d}" for n in (self.ids or ()))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
|
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
|
||||||
yield "r(eview)+", "approves the PR, if it's a forwardport also approves all non-detached parents"
|
yield "r(eview)+", "approves the PR, if it's a forwardport also approves all non-detached parents"
|
||||||
@ -184,7 +192,7 @@ class FW(enum.Enum):
|
|||||||
DEFAULT = enum.auto()
|
DEFAULT = enum.auto()
|
||||||
NO = enum.auto()
|
NO = enum.auto()
|
||||||
SKIPCI = enum.auto()
|
SKIPCI = enum.auto()
|
||||||
SKIPMERGE = enum.auto()
|
# SKIPMERGE = enum.auto()
|
||||||
|
|
||||||
def __str__(self) -> str:
|
def __str__(self) -> str:
|
||||||
return f'fw={self.name.lower()}'
|
return f'fw={self.name.lower()}'
|
||||||
@ -192,8 +200,8 @@ class FW(enum.Enum):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def help(cls, is_reviewer: bool) -> Iterator[Tuple[str, str]]:
|
def help(cls, is_reviewer: bool) -> Iterator[Tuple[str, str]]:
|
||||||
yield str(cls.NO), "does not forward-port this PR"
|
yield str(cls.NO), "does not forward-port this PR"
|
||||||
|
yield str(cls.DEFAULT), "forward-ports this PR normally"
|
||||||
if is_reviewer:
|
if is_reviewer:
|
||||||
yield str(cls.DEFAULT), "forward-ports this PR normally"
|
|
||||||
yield str(cls.SKIPCI), "does not wait for a forward-port's statuses to succeed before creating the next one"
|
yield str(cls.SKIPCI), "does not wait for a forward-port's statuses to succeed before creating the next one"
|
||||||
|
|
||||||
|
|
||||||
|
17
runbot_merge/models/ir_ui_view.py
Normal file
17
runbot_merge/models/ir_ui_view.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
from odoo import models
|
||||||
|
|
||||||
|
|
||||||
|
class View(models.Model):
|
||||||
|
_inherit = 'ir.ui.view'
|
||||||
|
|
||||||
|
def _log_view_warning(self, msg, node):
|
||||||
|
"""The view validator is dumb and triggers a warning because there's a
|
||||||
|
`field.btn`, even though making a `field[widget=url]` (which renders as
|
||||||
|
a link) look like a button is perfectly legitimate.
|
||||||
|
|
||||||
|
Suppress that warning.
|
||||||
|
"""
|
||||||
|
if node.tag == 'field' and node.get('widget') == 'url' and "button/submit/reset" in msg:
|
||||||
|
return
|
||||||
|
|
||||||
|
super()._log_view_warning(msg, node)
|
322
runbot_merge/models/patcher.py
Normal file
322
runbot_merge/models/patcher.py
Normal file
@ -0,0 +1,322 @@
|
|||||||
|
""" Implements direct (unstaged) patching.
|
||||||
|
|
||||||
|
Useful for massive data changes which are a pain to merge normally but very
|
||||||
|
unlikely to break things (e.g. i18n), fixes so urgent staging is an unacceptable
|
||||||
|
overhead, or FBI backdoors oh wait forget about that last one.
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import pathlib
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import tarfile
|
||||||
|
import tempfile
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from email import message_from_string, policy
|
||||||
|
from email.utils import parseaddr
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
from odoo import models, fields, api
|
||||||
|
from odoo.exceptions import ValidationError
|
||||||
|
from odoo.tools.mail import plaintext2html
|
||||||
|
|
||||||
|
from .pull_requests import Branch
|
||||||
|
from .. import git
|
||||||
|
|
||||||
|
_logger = logging.getLogger(__name__)
|
||||||
|
FILE_PATTERN = re.compile(r"""
|
||||||
|
# paths with spaces don't work well as the path can be followed by a timestamp
|
||||||
|
# (in an unspecified format?)
|
||||||
|
---\x20(?P<prefix_a>a/)?(?P<file_from>\S+)(:?\s.*)?\n
|
||||||
|
\+\+\+\x20(?P<prefix_b>b/)?(?P<file_to>\S+)(:?\s.*)?\n
|
||||||
|
@@\x20-(\d+(,\d+)?)\x20\+(\d+(,\d+)?)\x20@@ # trailing garbage
|
||||||
|
""", re.VERBOSE)
|
||||||
|
|
||||||
|
|
||||||
|
Authorship = Union[None, tuple[str, str], tuple[str, str, str]]
|
||||||
|
@dataclass
|
||||||
|
class ParseResult:
|
||||||
|
kind: str
|
||||||
|
author: Authorship
|
||||||
|
committer: Authorship
|
||||||
|
message: str
|
||||||
|
patch: str
|
||||||
|
|
||||||
|
|
||||||
|
def expect(line: str, starts_with: str, message: str) -> str:
|
||||||
|
if not line.startswith(starts_with):
|
||||||
|
raise ValidationError(message)
|
||||||
|
return line
|
||||||
|
|
||||||
|
|
||||||
|
def parse_show(p: Patch) -> ParseResult:
|
||||||
|
# headers are Author, Date or Author, AuthorDate, Commit, CommitDate
|
||||||
|
# commit message is indented 4 spaces
|
||||||
|
lines = iter(p.patch.splitlines(keepends=True))
|
||||||
|
if not next(lines).startswith("commit "):
|
||||||
|
raise ValidationError("Invalid patch")
|
||||||
|
name, email = parseaddr(
|
||||||
|
expect(next(lines), "Author:", "Missing author")
|
||||||
|
.split(maxsplit=1)[1])
|
||||||
|
date: str = next(lines)
|
||||||
|
header, date = date.split(maxsplit=1)
|
||||||
|
author = (name, email, date)
|
||||||
|
if header.startswith("Date:"):
|
||||||
|
committer = author
|
||||||
|
elif header.startswith("AuthorDate:"):
|
||||||
|
commit = expect(next(lines), "Commit:", "Missing committer")
|
||||||
|
commit_date = expect(next(lines), "CommitDate:", "Missing commit date")
|
||||||
|
name, email = parseaddr(commit.split(maxsplit=1)[1])
|
||||||
|
committer = (name, email, commit_date.split(maxsplit=1)[1])
|
||||||
|
else:
|
||||||
|
raise ValidationError(
|
||||||
|
"Invalid patch: expected 'Date:' or 'AuthorDate:' pseudo-header, "
|
||||||
|
f"found {header}.\nOnly 'medium' and 'fuller' formats are supported")
|
||||||
|
|
||||||
|
# skip possible extra headers before the message
|
||||||
|
while next(lines) != ' \n':
|
||||||
|
continue
|
||||||
|
|
||||||
|
body = []
|
||||||
|
while (l := next(lines)) != ' \n':
|
||||||
|
body.append(l.removeprefix(' '))
|
||||||
|
|
||||||
|
# remainder should be the patch
|
||||||
|
patch = "".join(
|
||||||
|
line for line in lines
|
||||||
|
if not line.startswith("git --diff ")
|
||||||
|
if not line.startswith("index ")
|
||||||
|
)
|
||||||
|
return ParseResult(kind="show", author=author, committer=committer, message="".join(body).rstrip(), patch=patch)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_format_patch(p: Patch) -> ParseResult:
|
||||||
|
m = message_from_string(p.patch, policy=policy.default)
|
||||||
|
if m.is_multipart():
|
||||||
|
raise ValidationError("multipart patches are not supported.")
|
||||||
|
|
||||||
|
name, email = parseaddr(m['from'])
|
||||||
|
author = (name, email, m['date'])
|
||||||
|
msg = re.sub(r'^\[PATCH( \d+/\d+)?\] ', '', m['subject'])
|
||||||
|
body, _, rest = m.get_payload().partition('---\n')
|
||||||
|
if body:
|
||||||
|
msg += '\n\n' + body
|
||||||
|
|
||||||
|
# split off the signature, per RFC 3676 § 4.3.
|
||||||
|
# leave the diffstat in as it *should* not confuse tooling?
|
||||||
|
patch, _, _ = rest.rpartition("-- \n")
|
||||||
|
# git (diff, show, format-patch) adds command and index headers to every
|
||||||
|
# file header, which patch(1) chokes on, strip them... but maybe this should
|
||||||
|
# extract the udiff sections instead?
|
||||||
|
patch = re.sub(
|
||||||
|
"^(git --diff .*|index .*)\n",
|
||||||
|
"",
|
||||||
|
patch,
|
||||||
|
flags=re.MULTILINE,
|
||||||
|
)
|
||||||
|
return ParseResult(kind="format-patch", author=author, committer=author, message=msg, patch=patch)
|
||||||
|
|
||||||
|
|
||||||
|
class PatchFailure(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Patch(models.Model):
|
||||||
|
_name = "runbot_merge.patch"
|
||||||
|
_inherit = ['mail.thread']
|
||||||
|
_description = "Unstaged direct-application patch"
|
||||||
|
|
||||||
|
active = fields.Boolean(default=True, tracking=True)
|
||||||
|
repository = fields.Many2one('runbot_merge.repository', required=True, tracking=True)
|
||||||
|
target = fields.Many2one('runbot_merge.branch', required=True, tracking=True)
|
||||||
|
commit = fields.Char(size=40, string="commit to cherry-pick, must be in-network", tracking=True)
|
||||||
|
|
||||||
|
patch = fields.Text(string="unified diff to apply", tracking=True)
|
||||||
|
format = fields.Selection([
|
||||||
|
("format-patch", "format-patch"),
|
||||||
|
("show", "show"),
|
||||||
|
], compute="_compute_patch_meta")
|
||||||
|
message = fields.Text(compute="_compute_patch_meta")
|
||||||
|
|
||||||
|
_sql_constraints = [
|
||||||
|
('patch_contents_either', 'check ((commit is null) != (patch is null))', 'Either the commit or patch must be set, and not both.'),
|
||||||
|
]
|
||||||
|
|
||||||
|
@api.depends("patch")
|
||||||
|
def _compute_patch_meta(self) -> None:
|
||||||
|
for p in self:
|
||||||
|
if r := p._parse_patch():
|
||||||
|
p.format = r.kind
|
||||||
|
p.message = r.message
|
||||||
|
else:
|
||||||
|
p.format = False
|
||||||
|
p.message = False
|
||||||
|
|
||||||
|
def _parse_patch(self) -> ParseResult | None:
|
||||||
|
if not self.patch:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if self.patch.startswith("commit "):
|
||||||
|
return parse_show(self)
|
||||||
|
elif self.patch.startswith("From "):
|
||||||
|
return parse_format_patch(self)
|
||||||
|
else:
|
||||||
|
raise ValidationError("Only `git show` and `git format-patch` formats are supported")
|
||||||
|
|
||||||
|
def _auto_init(self):
|
||||||
|
super()._auto_init()
|
||||||
|
self.env.cr.execute("""
|
||||||
|
CREATE INDEX IF NOT EXISTS runbot_merge_patch_active
|
||||||
|
ON runbot_merge_patch (target) WHERE active
|
||||||
|
""")
|
||||||
|
|
||||||
|
@api.model_create_multi
|
||||||
|
def create(self, vals_list):
|
||||||
|
if any(vals.get('active') is not False for vals in vals_list):
|
||||||
|
self.env.ref("runbot_merge.staging_cron")._trigger()
|
||||||
|
return super().create(vals_list)
|
||||||
|
|
||||||
|
def write(self, vals):
|
||||||
|
if vals.get("active") is not False:
|
||||||
|
self.env.ref("runbot_merge.staging_cron")._trigger()
|
||||||
|
return super().write(vals)
|
||||||
|
|
||||||
|
@api.constrains('patch')
|
||||||
|
def _validate_patch(self):
|
||||||
|
for p in self:
|
||||||
|
patch = p._parse_patch()
|
||||||
|
if not patch:
|
||||||
|
continue
|
||||||
|
|
||||||
|
has_files = False
|
||||||
|
for m in FILE_PATTERN.finditer(patch.patch):
|
||||||
|
has_files = True
|
||||||
|
if m['file_from'] != m['file_to']:
|
||||||
|
raise ValidationError("Only patches updating a file in place are supported, not creation, removal, or renaming.")
|
||||||
|
if not has_files:
|
||||||
|
raise ValidationError("Patches should have files they patch, found none.")
|
||||||
|
|
||||||
|
def _apply_patches(self, target: Branch) -> bool:
|
||||||
|
patches = self.search([('target', '=', target.id)], order='id asc')
|
||||||
|
if not patches:
|
||||||
|
return True
|
||||||
|
|
||||||
|
commits = {}
|
||||||
|
repos = {}
|
||||||
|
for p in patches:
|
||||||
|
repos[p.repository] = git.get_local(p.repository).check(True)
|
||||||
|
commits.setdefault(p.repository, set())
|
||||||
|
if p.commit:
|
||||||
|
commits[p.repository].add(p.commit)
|
||||||
|
|
||||||
|
for patch in patches:
|
||||||
|
patch.active = False
|
||||||
|
r = repos[patch.repository]
|
||||||
|
remote = git.source_url(patch.repository)
|
||||||
|
if (cs := commits.pop(patch.repository, None)) is not None:
|
||||||
|
# first time encountering a repo, fetch the branch and commits
|
||||||
|
r.fetch(remote, f"+refs/heads/{target.name}:refs/heads/{target.name}", *cs, no_tags=True)
|
||||||
|
|
||||||
|
_logger.info(
|
||||||
|
"Applying %s to %r (in %s)",
|
||||||
|
patch,
|
||||||
|
patch.target.display_name,
|
||||||
|
patch.repository.name,
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
if patch.commit:
|
||||||
|
c = patch._apply_commit(r)
|
||||||
|
else:
|
||||||
|
c = patch._apply_patch(r)
|
||||||
|
except Exception as e:
|
||||||
|
if isinstance(e, PatchFailure):
|
||||||
|
subject = "Unable to apply patch"
|
||||||
|
else:
|
||||||
|
subject = "Unknown error while trying to apply patch"
|
||||||
|
_logger.error("%s:\n%s", subject, str(e))
|
||||||
|
patch.message_post(body=plaintext2html(e), subject=subject)
|
||||||
|
continue
|
||||||
|
# `.` is the local "remote", so this updates target to c
|
||||||
|
r.fetch(".", f"{c}:{target.name}")
|
||||||
|
|
||||||
|
# push patch by patch, avoids sync issues and in most cases we have 0~1 patches
|
||||||
|
res = r.check(False).stdout()\
|
||||||
|
.with_config(encoding="utf-8")\
|
||||||
|
.push(remote, f"{target.name}:{target.name}")
|
||||||
|
## one of the repos is out of consistency, loop around to new staging?
|
||||||
|
if res.returncode:
|
||||||
|
_logger.warning(
|
||||||
|
"Unable to push result of %s\nout:\n%s\nerr:\n%s",
|
||||||
|
patch.id,
|
||||||
|
res.stdout,
|
||||||
|
res.stderr,
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _apply_commit(self, r: git.Repo) -> str:
|
||||||
|
r = r.check(True).stdout().with_config(encoding="utf-8")
|
||||||
|
# TODO: maybe use git-rev-list instead?
|
||||||
|
sha = r.show('--no-patch', '--pretty=%H', self.target.name).stdout.strip()
|
||||||
|
target = r.show('--no-patch', '--pretty=%an%n%ae%n%ai%n%cn%n%ce%n%ci%n%B', self.commit)
|
||||||
|
# retrieve metadata of cherrypicked commit
|
||||||
|
author_name, author_email, author_date, committer_name, committer_email, committer_date, body =\
|
||||||
|
target.stdout.strip().split("\n", 6)
|
||||||
|
|
||||||
|
res = r.check(False).merge_tree(sha, self.commit)
|
||||||
|
if res.returncode:
|
||||||
|
_conflict_info, _, informational = res.stdout.partition('\n\n')
|
||||||
|
raise PatchFailure(informational)
|
||||||
|
|
||||||
|
return r.commit_tree(
|
||||||
|
tree=res.stdout.strip(),
|
||||||
|
parents=[sha],
|
||||||
|
message=body.strip(),
|
||||||
|
author=(author_name, author_email, author_date),
|
||||||
|
committer=(committer_name, committer_email, committer_date),
|
||||||
|
).stdout.strip()
|
||||||
|
|
||||||
|
def _apply_patch(self, r: git.Repo) -> str:
|
||||||
|
p = self._parse_patch()
|
||||||
|
files = {}
|
||||||
|
def reader(_r, f):
|
||||||
|
return pathlib.Path(tmpdir, f).read_text(encoding="utf-8")
|
||||||
|
|
||||||
|
prefix = 0
|
||||||
|
for m in FILE_PATTERN.finditer(p.patch):
|
||||||
|
if not prefix and m['prefix_a'] and m['prefix_b']:
|
||||||
|
prefix = 1
|
||||||
|
|
||||||
|
files[m['file_to']] = reader
|
||||||
|
|
||||||
|
archiver = r.stdout(True)
|
||||||
|
# if the parent is checked then we can't get rid of the kwarg and popen doesn't support it
|
||||||
|
archiver._config.pop('check', None)
|
||||||
|
archiver.runner = subprocess.Popen
|
||||||
|
with archiver.archive(self.target.name, *files) as out, \
|
||||||
|
tarfile.open(fileobj=out.stdout, mode='r|') as tf,\
|
||||||
|
tempfile.TemporaryDirectory() as tmpdir:
|
||||||
|
tf.extractall(tmpdir)
|
||||||
|
patch = subprocess.run(
|
||||||
|
['patch', f'-p{prefix}', '--directory', tmpdir, '--verbose'],
|
||||||
|
input=p.patch,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
encoding='utf-8',
|
||||||
|
)
|
||||||
|
if patch.returncode:
|
||||||
|
raise PatchFailure("\n---------\n".join(filter(None, [p.patch, patch.stdout.strip(), patch.stderr.strip()])))
|
||||||
|
new_tree = r.update_tree(self.target.name, files)
|
||||||
|
|
||||||
|
sha = r.stdout().with_config(encoding='utf-8')\
|
||||||
|
.show('--no-patch', '--pretty=%H', self.target.name)\
|
||||||
|
.stdout.strip()
|
||||||
|
return r.commit_tree(
|
||||||
|
tree=new_tree,
|
||||||
|
parents=[sha],
|
||||||
|
message=p.message,
|
||||||
|
author=p.author,
|
||||||
|
committer=p.committer,
|
||||||
|
).stdout.strip()
|
@ -147,6 +147,18 @@ class Project(models.Model):
|
|||||||
('staging_enabled', '=', True),
|
('staging_enabled', '=', True),
|
||||||
('project_id.staging_enabled', '=', True),
|
('project_id.staging_enabled', '=', True),
|
||||||
]):
|
]):
|
||||||
|
try:
|
||||||
|
with self.env.cr.savepoint():
|
||||||
|
if not self.env['runbot_merge.patch']._apply_patches(branch):
|
||||||
|
self.env.ref("runbot_merge.staging_cron")._trigger()
|
||||||
|
return
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
_logger.exception("Failed to apply patches to branch %r", branch.name)
|
||||||
|
else:
|
||||||
|
if commit:
|
||||||
|
self.env.cr.commit()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with self.env.cr.savepoint(), \
|
with self.env.cr.savepoint(), \
|
||||||
sentry_sdk.start_span(description=f'create staging {branch.name}') as span:
|
sentry_sdk.start_span(description=f'create staging {branch.name}') as span:
|
||||||
|
@ -217,7 +217,7 @@ class FreezeWizard(models.Model):
|
|||||||
for r in self.project_id.repo_ids
|
for r in self.project_id.repo_ids
|
||||||
}
|
}
|
||||||
for repo, copy in repos.items():
|
for repo, copy in repos.items():
|
||||||
copy.fetch(git.source_url(repo), '+refs/heads/*:refs/heads/*')
|
copy.fetch(git.source_url(repo), '+refs/heads/*:refs/heads/*', no_tags=True)
|
||||||
all_prs = self.release_pr_ids.pr_id | self.bump_pr_ids.pr_id
|
all_prs = self.release_pr_ids.pr_id | self.bump_pr_ids.pr_id
|
||||||
for pr in all_prs:
|
for pr in all_prs:
|
||||||
repos[pr.repository].fetch(
|
repos[pr.repository].fetch(
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import ast
|
import ast
|
||||||
|
import builtins
|
||||||
import collections
|
import collections
|
||||||
import contextlib
|
import contextlib
|
||||||
import datetime
|
import datetime
|
||||||
@ -9,6 +10,7 @@ import json
|
|||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
|
from enum import IntEnum
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
from operator import itemgetter
|
from operator import itemgetter
|
||||||
from typing import Optional, Union, List, Iterator, Tuple
|
from typing import Optional, Union, List, Iterator, Tuple
|
||||||
@ -21,7 +23,7 @@ from markupsafe import Markup
|
|||||||
from odoo import api, fields, models, tools, Command
|
from odoo import api, fields, models, tools, Command
|
||||||
from odoo.exceptions import AccessError, UserError
|
from odoo.exceptions import AccessError, UserError
|
||||||
from odoo.osv import expression
|
from odoo.osv import expression
|
||||||
from odoo.tools import html_escape, Reverse
|
from odoo.tools import html_escape, Reverse, mute_logger
|
||||||
from . import commands
|
from . import commands
|
||||||
from .utils import enum, readonly, dfm
|
from .utils import enum, readonly, dfm
|
||||||
|
|
||||||
@ -101,12 +103,19 @@ All substitutions are tentatively applied sequentially to the input.
|
|||||||
return github.GH(self.project_id[token_field], self.name)
|
return github.GH(self.project_id[token_field], self.name)
|
||||||
|
|
||||||
def _auto_init(self):
|
def _auto_init(self):
|
||||||
res = super(Repository, self)._auto_init()
|
res = super()._auto_init()
|
||||||
tools.create_unique_index(
|
tools.create_unique_index(
|
||||||
self._cr, 'runbot_merge_unique_repo', self._table, ['name'])
|
self._cr, 'runbot_merge_unique_repo', self._table, ['name'])
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def _load_pr(self, number, *, closing=False):
|
def _load_pr(
|
||||||
|
self,
|
||||||
|
number: int,
|
||||||
|
*,
|
||||||
|
closing: bool = False,
|
||||||
|
squash: bool = False,
|
||||||
|
ping: str | None = None,
|
||||||
|
):
|
||||||
gh = self.github()
|
gh = self.github()
|
||||||
|
|
||||||
# fetch PR object and handle as *opened*
|
# fetch PR object and handle as *opened*
|
||||||
@ -133,6 +142,10 @@ All substitutions are tentatively applied sequentially to the input.
|
|||||||
('number', '=', number),
|
('number', '=', number),
|
||||||
])
|
])
|
||||||
if pr_id:
|
if pr_id:
|
||||||
|
if squash:
|
||||||
|
pr_id.squash = pr['commits'] == 1
|
||||||
|
return
|
||||||
|
|
||||||
sync = controllers.handle_pr(self.env, {
|
sync = controllers.handle_pr(self.env, {
|
||||||
'action': 'synchronize',
|
'action': 'synchronize',
|
||||||
'pull_request': pr,
|
'pull_request': pr,
|
||||||
@ -233,7 +246,10 @@ All substitutions are tentatively applied sequentially to the input.
|
|||||||
self.env.ref('runbot_merge.pr.load.fetched')._send(
|
self.env.ref('runbot_merge.pr.load.fetched')._send(
|
||||||
repository=self,
|
repository=self,
|
||||||
pull_request=number,
|
pull_request=number,
|
||||||
format_args={'pr': pr_id},
|
format_args={
|
||||||
|
'pr': pr_id,
|
||||||
|
'ping': ping or pr_id.ping,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
def having_branch(self, branch):
|
def having_branch(self, branch):
|
||||||
@ -279,7 +295,7 @@ class Branch(models.Model):
|
|||||||
staging_enabled = fields.Boolean(default=True)
|
staging_enabled = fields.Boolean(default=True)
|
||||||
|
|
||||||
def _auto_init(self):
|
def _auto_init(self):
|
||||||
res = super(Branch, self)._auto_init()
|
res = super()._auto_init()
|
||||||
tools.create_unique_index(
|
tools.create_unique_index(
|
||||||
self._cr, 'runbot_merge_unique_branch_per_repo',
|
self._cr, 'runbot_merge_unique_branch_per_repo',
|
||||||
self._table, ['name', 'project_id'])
|
self._table, ['name', 'project_id'])
|
||||||
@ -303,6 +319,13 @@ class Branch(models.Model):
|
|||||||
'message': tmpl._format(pr=pr),
|
'message': tmpl._format(pr=pr),
|
||||||
} for pr in actives.prs])
|
} for pr in actives.prs])
|
||||||
self.env.ref('runbot_merge.branch_cleanup')._trigger()
|
self.env.ref('runbot_merge.branch_cleanup')._trigger()
|
||||||
|
|
||||||
|
if (
|
||||||
|
(vals.get('staging_enabled') is True and not all(self.mapped('staging_enabled')))
|
||||||
|
or (vals.get('active') is True and not all(self.mapped('active')))
|
||||||
|
):
|
||||||
|
self.env.ref('runbot_merge.staging_cron')._trigger()
|
||||||
|
|
||||||
super().write(vals)
|
super().write(vals)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -419,10 +442,16 @@ class PullRequests(models.Model):
|
|||||||
staging_id = fields.Many2one('runbot_merge.stagings', compute='_compute_staging', inverse=readonly, readonly=True, store=True)
|
staging_id = fields.Many2one('runbot_merge.stagings', compute='_compute_staging', inverse=readonly, readonly=True, store=True)
|
||||||
staging_ids = fields.Many2many('runbot_merge.stagings', string="Stagings", compute='_compute_stagings', inverse=readonly, readonly=True, context={"active_test": False})
|
staging_ids = fields.Many2many('runbot_merge.stagings', string="Stagings", compute='_compute_stagings', inverse=readonly, readonly=True, context={"active_test": False})
|
||||||
|
|
||||||
@api.depends('batch_id.batch_staging_ids.runbot_merge_stagings_id.active')
|
@api.depends(
|
||||||
|
'closed',
|
||||||
|
'batch_id.batch_staging_ids.runbot_merge_stagings_id.active',
|
||||||
|
)
|
||||||
def _compute_staging(self):
|
def _compute_staging(self):
|
||||||
for p in self:
|
for p in self:
|
||||||
p.staging_id = p.batch_id.staging_ids.filtered('active')
|
if p.closed:
|
||||||
|
p.staging_id = False
|
||||||
|
else:
|
||||||
|
p.staging_id = p.batch_id.staging_ids.filtered('active')
|
||||||
|
|
||||||
@api.depends('batch_id.batch_staging_ids.runbot_merge_stagings_id')
|
@api.depends('batch_id.batch_staging_ids.runbot_merge_stagings_id')
|
||||||
def _compute_stagings(self):
|
def _compute_stagings(self):
|
||||||
@ -576,8 +605,6 @@ class PullRequests(models.Model):
|
|||||||
|
|
||||||
@api.depends(
|
@api.depends(
|
||||||
'batch_id.prs.draft',
|
'batch_id.prs.draft',
|
||||||
'batch_id.prs.squash',
|
|
||||||
'batch_id.prs.merge_method',
|
|
||||||
'batch_id.prs.state',
|
'batch_id.prs.state',
|
||||||
'batch_id.skipchecks',
|
'batch_id.skipchecks',
|
||||||
)
|
)
|
||||||
@ -585,12 +612,11 @@ class PullRequests(models.Model):
|
|||||||
self.blocked = False
|
self.blocked = False
|
||||||
requirements = (
|
requirements = (
|
||||||
lambda p: not p.draft,
|
lambda p: not p.draft,
|
||||||
lambda p: p.squash or p.merge_method,
|
|
||||||
lambda p: p.state == 'ready' \
|
lambda p: p.state == 'ready' \
|
||||||
or p.batch_id.skipchecks \
|
or p.batch_id.skipchecks \
|
||||||
and all(pr.state != 'error' for pr in p.batch_id.prs)
|
and all(pr.state != 'error' for pr in p.batch_id.prs)
|
||||||
)
|
)
|
||||||
messages = ('is in draft', 'has no merge method', 'is not ready')
|
messages = ('is in draft', 'is not ready')
|
||||||
for pr in self:
|
for pr in self:
|
||||||
if pr.state in ('merged', 'closed'):
|
if pr.state in ('merged', 'closed'):
|
||||||
continue
|
continue
|
||||||
@ -613,26 +639,54 @@ class PullRequests(models.Model):
|
|||||||
return json.loads(self.overrides)
|
return json.loads(self.overrides)
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
def _get_or_schedule(self, repo_name, number, *, target=None, closing=False):
|
def _get_or_schedule(
|
||||||
|
self,
|
||||||
|
repo_name: str,
|
||||||
|
number: int,
|
||||||
|
*,
|
||||||
|
target: str | None = None,
|
||||||
|
closing: bool = False,
|
||||||
|
commenter: str | None = None,
|
||||||
|
) -> PullRequests | None:
|
||||||
repo = self.env['runbot_merge.repository'].search([('name', '=', repo_name)])
|
repo = self.env['runbot_merge.repository'].search([('name', '=', repo_name)])
|
||||||
if not repo:
|
if not repo:
|
||||||
return
|
source = self.env['runbot_merge.events_sources'].search([('repository', '=', repo_name)])
|
||||||
|
_logger.warning(
|
||||||
if target and not repo.project_id._has_branch(target):
|
"Got a PR notification for unknown repository %s (source %s)",
|
||||||
self.env.ref('runbot_merge.pr.fetch.unmanaged')._send(
|
repo_name, source,
|
||||||
repository=repo,
|
|
||||||
pull_request=number,
|
|
||||||
format_args={'repository': repo, 'branch': target, 'number': number}
|
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
pr = self.search([
|
if target:
|
||||||
('repository', '=', repo.id),
|
b = self.env['runbot_merge.branch'].with_context(active_test=False).search([
|
||||||
('number', '=', number,)
|
('project_id', '=', repo.project_id.id),
|
||||||
])
|
('name', '=', target),
|
||||||
|
])
|
||||||
|
tmpl = None if b.active \
|
||||||
|
else 'runbot_merge.handle.branch.inactive' if b\
|
||||||
|
else 'runbot_merge.pr.fetch.unmanaged'
|
||||||
|
else:
|
||||||
|
tmpl = None
|
||||||
|
|
||||||
|
pr = self.search([('repository', '=', repo.id), ('number', '=', number)])
|
||||||
|
if pr and not pr.target.active:
|
||||||
|
tmpl = 'runbot_merge.handle.branch.inactive'
|
||||||
|
target = pr.target.name
|
||||||
|
|
||||||
|
if tmpl and not closing:
|
||||||
|
self.env.ref(tmpl)._send(
|
||||||
|
repository=repo,
|
||||||
|
pull_request=number,
|
||||||
|
format_args={'repository': repo_name, 'branch': target, 'number': number},
|
||||||
|
)
|
||||||
|
|
||||||
if pr:
|
if pr:
|
||||||
return pr
|
return pr
|
||||||
|
|
||||||
|
# if the branch is unknown or inactive, no need to fetch the PR
|
||||||
|
if tmpl:
|
||||||
|
return
|
||||||
|
|
||||||
Fetch = self.env['runbot_merge.fetch_job']
|
Fetch = self.env['runbot_merge.fetch_job']
|
||||||
if Fetch.search([('repository', '=', repo.id), ('number', '=', number)]):
|
if Fetch.search([('repository', '=', repo.id), ('number', '=', number)]):
|
||||||
return
|
return
|
||||||
@ -640,6 +694,7 @@ class PullRequests(models.Model):
|
|||||||
'repository': repo.id,
|
'repository': repo.id,
|
||||||
'number': number,
|
'number': number,
|
||||||
'closing': closing,
|
'closing': closing,
|
||||||
|
'commenter': commenter,
|
||||||
})
|
})
|
||||||
|
|
||||||
def _iter_ancestors(self) -> Iterator[PullRequests]:
|
def _iter_ancestors(self) -> Iterator[PullRequests]:
|
||||||
@ -673,14 +728,14 @@ class PullRequests(models.Model):
|
|||||||
})
|
})
|
||||||
|
|
||||||
is_admin, is_reviewer, is_author = self._pr_acl(author)
|
is_admin, is_reviewer, is_author = self._pr_acl(author)
|
||||||
_source_admin, source_reviewer, source_author = self.source_id._pr_acl(author)
|
source_author = self.source_id._pr_acl(author).is_author
|
||||||
# nota: 15.0 `has_group` completely doesn't work if the recordset is empty
|
# nota: 15.0 `has_group` completely doesn't work if the recordset is empty
|
||||||
super_admin = is_admin and author.user_ids and author.user_ids.has_group('runbot_merge.group_admin')
|
super_admin = is_admin and author.user_ids and author.user_ids.has_group('runbot_merge.group_admin')
|
||||||
|
|
||||||
help_list: list[type(commands.Command)] = list(filter(None, [
|
help_list: list[type(commands.Command)] = list(filter(None, [
|
||||||
commands.Help,
|
commands.Help,
|
||||||
|
|
||||||
(self.source_id and (source_author or source_reviewer) or is_reviewer) and not self.reviewed_by and commands.Approve,
|
(is_reviewer or (self.source_id and source_author)) and not self.reviewed_by and commands.Approve,
|
||||||
(is_author or source_author) and self.reviewed_by and commands.Reject,
|
(is_author or source_author) and self.reviewed_by and commands.Reject,
|
||||||
(is_author or source_author) and self.error and commands.Retry,
|
(is_author or source_author) and self.error and commands.Retry,
|
||||||
|
|
||||||
@ -762,55 +817,21 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
match command:
|
match command:
|
||||||
case commands.Approve() if self.draft:
|
case commands.Approve() if self.draft:
|
||||||
msg = "draft PRs can not be approved."
|
msg = "draft PRs can not be approved."
|
||||||
case commands.Approve() if self.source_id:
|
case commands.Approve() if self.source_id and (source_author or is_reviewer):
|
||||||
# rules are a touch different for forwardport PRs:
|
if selected := [p for p in self._iter_ancestors() if p.number in command]:
|
||||||
valid = lambda _: True if command.ids is None else lambda n: n in command.ids
|
for pr in selected:
|
||||||
_, source_reviewer, source_author = self.source_id._pr_acl(author)
|
# ignore already reviewed PRs, unless it's the one
|
||||||
|
# being r+'d, this means ancestors in error will not
|
||||||
ancestors = list(self._iter_ancestors())
|
# be warned about
|
||||||
# - reviewers on the original can approve any forward port
|
if pr == self or not pr.reviewed_by:
|
||||||
if source_reviewer:
|
pr._approve(author, login)
|
||||||
approveable = ancestors
|
|
||||||
elif source_author:
|
|
||||||
# give full review rights on all forwardports (attached
|
|
||||||
# or not) to original author
|
|
||||||
approveable = ancestors
|
|
||||||
else:
|
else:
|
||||||
# between the first merged ancestor and self
|
msg = f"tried to approve PRs {command.fmt()} but no such PR is an ancestors of {self.number}"
|
||||||
mergeors = list(itertools.dropwhile(
|
|
||||||
lambda p: p.state != 'merged',
|
|
||||||
reversed(ancestors),
|
|
||||||
))
|
|
||||||
# between the first ancestor the current user can review and self
|
|
||||||
reviewors = list(itertools.dropwhile(
|
|
||||||
lambda p: not p._pr_acl(author).is_reviewer,
|
|
||||||
reversed(ancestors),
|
|
||||||
))
|
|
||||||
|
|
||||||
# source author can approve any descendant of a merged
|
|
||||||
# forward port (or source), people with review rights
|
|
||||||
# to a forward port have review rights to its
|
|
||||||
# descendants, if both apply use the most favorable
|
|
||||||
# (largest number of PRs)
|
|
||||||
if source_author and len(mergeors) > len(reviewors):
|
|
||||||
approveable = mergeors
|
|
||||||
else:
|
|
||||||
approveable = reviewors
|
|
||||||
|
|
||||||
if approveable:
|
|
||||||
for pr in approveable:
|
|
||||||
if not (pr.state in RPLUS and valid(pr.number)):
|
|
||||||
continue
|
|
||||||
msg = pr._approve(author, login)
|
|
||||||
if msg:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
msg = f"you can't {command} you silly little bean."
|
|
||||||
case commands.Approve() if is_reviewer:
|
case commands.Approve() if is_reviewer:
|
||||||
if command.ids is not None and command.ids != [self.number]:
|
if command.ids is None or command.ids == [self.number]:
|
||||||
msg = f"tried to approve PRs {command.ids} but the current PR is {self.number}"
|
|
||||||
else:
|
|
||||||
msg = self._approve(author, login)
|
msg = self._approve(author, login)
|
||||||
|
else:
|
||||||
|
msg = f"tried to approve PRs {command.fmt()} but the current PR is {self.number}"
|
||||||
case commands.Reject() if is_author or source_author:
|
case commands.Reject() if is_author or source_author:
|
||||||
if self.batch_id.skipchecks or self.reviewed_by:
|
if self.batch_id.skipchecks or self.reviewed_by:
|
||||||
if self.error:
|
if self.error:
|
||||||
@ -824,10 +845,12 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
pull_request=self.number,
|
pull_request=self.number,
|
||||||
format_args={'user': login, 'pr': self},
|
format_args={'user': login, 'pr': self},
|
||||||
)
|
)
|
||||||
if self.source_id:
|
if self.source_id.forwardport_ids.filtered(
|
||||||
|
lambda p: p.reviewed_by and p.state not in ('merged', 'closed')
|
||||||
|
):
|
||||||
feedback("Note that only this forward-port has been"
|
feedback("Note that only this forward-port has been"
|
||||||
" unapproved, sibling forward ports may "
|
" unapproved, sibling forward ports may"
|
||||||
"have to be unapproved individually.")
|
" have to be unapproved individually.")
|
||||||
self.unstage("unreviewed (r-) by %s", login)
|
self.unstage("unreviewed (r-) by %s", login)
|
||||||
else:
|
else:
|
||||||
msg = "r- makes no sense in the current PR state."
|
msg = "r- makes no sense in the current PR state."
|
||||||
@ -839,6 +862,10 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
pull_request=self.number,
|
pull_request=self.number,
|
||||||
format_args={'new_method': explanation, 'pr': self, 'user': login},
|
format_args={'new_method': explanation, 'pr': self, 'user': login},
|
||||||
)
|
)
|
||||||
|
# if the merge method is the only thing preventing (but not
|
||||||
|
# *blocking*) staging, trigger a staging
|
||||||
|
if self.state == 'ready':
|
||||||
|
self.env.ref("runbot_merge.staging_cron")._trigger()
|
||||||
case commands.Retry() if is_author or source_author:
|
case commands.Retry() if is_author or source_author:
|
||||||
if self.error:
|
if self.error:
|
||||||
self.error = False
|
self.error = False
|
||||||
@ -905,21 +932,38 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
case commands.Close() if source_author:
|
case commands.Close() if source_author:
|
||||||
feedback(close=True)
|
feedback(close=True)
|
||||||
case commands.FW():
|
case commands.FW():
|
||||||
|
message = None
|
||||||
match command:
|
match command:
|
||||||
case commands.FW.NO if is_author or source_author:
|
case commands.FW.NO if is_author or source_author:
|
||||||
message = "Disabled forward-porting."
|
message = "Disabled forward-porting."
|
||||||
case commands.FW.DEFAULT if is_author or source_author:
|
case commands.FW.DEFAULT if is_author or source_author:
|
||||||
message = "Waiting for CI to create followup forward-ports."
|
message = "Waiting for CI to create followup forward-ports."
|
||||||
case commands.FW.SKIPCI if is_reviewer or source_reviewer:
|
case commands.FW.SKIPCI if is_reviewer:
|
||||||
message = "Not waiting for CI to create followup forward-ports."
|
message = "Not waiting for CI to create followup forward-ports."
|
||||||
case commands.FW.SKIPMERGE if is_reviewer or source_reviewer:
|
# case commands.FW.SKIPMERGE if is_reviewer:
|
||||||
message = "Not waiting for merge to create followup forward-ports."
|
# message = "Not waiting for merge to create followup forward-ports."
|
||||||
case _:
|
case _:
|
||||||
msg = f"you don't have the right to {command}."
|
msg = f"you don't have the right to {command}."
|
||||||
|
if message:
|
||||||
|
# TODO: feedback?
|
||||||
|
if self.source_id:
|
||||||
|
"if the pr is not a source, ignore (maybe?)"
|
||||||
|
elif not self.merge_date:
|
||||||
|
"if the PR is not merged, it'll be fw'd normally"
|
||||||
|
elif self.batch_id.fw_policy != 'no' or command == commands.FW.NO:
|
||||||
|
"if the policy is not being flipped from no to something else, nothing to do"
|
||||||
|
elif branch_key(self.limit_id) <= branch_key(self.target):
|
||||||
|
"if the limit is lower than current (old style ignore) there's nothing to do"
|
||||||
|
else:
|
||||||
|
message = f"Starting forward-port. {message}"
|
||||||
|
self.env['forwardport.batches'].create({
|
||||||
|
'batch_id': self.batch_id.id,
|
||||||
|
'source': 'merge',
|
||||||
|
})
|
||||||
|
|
||||||
if not msg:
|
|
||||||
(self.source_id or self).batch_id.fw_policy = command.name.lower()
|
(self.source_id or self).batch_id.fw_policy = command.name.lower()
|
||||||
feedback(message=message)
|
feedback(message=message)
|
||||||
|
|
||||||
case commands.Limit(branch) if is_author:
|
case commands.Limit(branch) if is_author:
|
||||||
if branch is None:
|
if branch is None:
|
||||||
feedback(message="'ignore' is deprecated, use 'fw=no' to disable forward porting.")
|
feedback(message="'ignore' is deprecated, use 'fw=no' to disable forward porting.")
|
||||||
@ -927,7 +971,7 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
for p in self.batch_id.prs:
|
for p in self.batch_id.prs:
|
||||||
ping, m = p._maybe_update_limit(limit)
|
ping, m = p._maybe_update_limit(limit)
|
||||||
|
|
||||||
if ping and p == self:
|
if ping is Ping.ERROR and p == self:
|
||||||
msg = m
|
msg = m
|
||||||
else:
|
else:
|
||||||
if ping:
|
if ping:
|
||||||
@ -960,31 +1004,37 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
feedback(message=f"@{login}{rejections}{footer}")
|
feedback(message=f"@{login}{rejections}{footer}")
|
||||||
return 'rejected'
|
return 'rejected'
|
||||||
|
|
||||||
def _maybe_update_limit(self, limit: str) -> Tuple[bool, str]:
|
def _maybe_update_limit(self, limit: str) -> Tuple[Ping, str]:
|
||||||
limit_id = self.env['runbot_merge.branch'].with_context(active_test=False).search([
|
limit_id = self.env['runbot_merge.branch'].with_context(active_test=False).search([
|
||||||
('project_id', '=', self.repository.project_id.id),
|
('project_id', '=', self.repository.project_id.id),
|
||||||
('name', '=', limit),
|
('name', '=', limit),
|
||||||
])
|
])
|
||||||
if not limit_id:
|
if not limit_id:
|
||||||
return True, f"there is no branch {limit!r}, it can't be used as a forward port target."
|
return Ping.ERROR, f"there is no branch {limit!r}, it can't be used as a forward port target."
|
||||||
|
|
||||||
if limit_id != self.target and not limit_id.active:
|
if limit_id != self.target and not limit_id.active:
|
||||||
return True, f"branch {limit_id.name!r} is disabled, it can't be used as a forward port target."
|
return Ping.ERROR, f"branch {limit_id.name!r} is disabled, it can't be used as a forward port target."
|
||||||
|
|
||||||
# not forward ported yet, just acknowledge the request
|
# not forward ported yet, just acknowledge the request
|
||||||
if not self.source_id and self.state != 'merged':
|
if not self.source_id and self.state != 'merged':
|
||||||
self.limit_id = limit_id
|
self.limit_id = limit_id
|
||||||
if branch_key(limit_id) <= branch_key(self.target):
|
if branch_key(limit_id) <= branch_key(self.target):
|
||||||
return False, "Forward-port disabled (via limit)."
|
return Ping.NO, "Forward-port disabled (via limit)."
|
||||||
else:
|
else:
|
||||||
return False, f"Forward-porting to {limit_id.name!r}."
|
suffix = ''
|
||||||
|
if self.batch_id.fw_policy == 'no':
|
||||||
|
self.batch_id.fw_policy = 'default'
|
||||||
|
suffix = " Re-enabled forward-porting (you should use "\
|
||||||
|
"`fw=default` to re-enable forward porting "\
|
||||||
|
"after disabling)."
|
||||||
|
return Ping.NO, f"Forward-porting to {limit_id.name!r}.{suffix}"
|
||||||
|
|
||||||
# if the PR has been forwardported
|
# if the PR has been forwardported
|
||||||
prs = (self | self.forwardport_ids | self.source_id | self.source_id.forwardport_ids)
|
prs = (self | self.forwardport_ids | self.source_id | self.source_id.forwardport_ids)
|
||||||
tip = max(prs, key=pr_key)
|
tip = max(prs, key=pr_key)
|
||||||
# if the fp tip was closed it's fine
|
# if the fp tip was closed it's fine
|
||||||
if tip.state == 'closed':
|
if tip.state == 'closed':
|
||||||
return True, f"{tip.display_name} is closed, no forward porting is going on"
|
return Ping.ERROR, f"{tip.display_name} is closed, no forward porting is going on"
|
||||||
|
|
||||||
prs.limit_id = limit_id
|
prs.limit_id = limit_id
|
||||||
|
|
||||||
@ -1003,7 +1053,7 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
except psycopg2.errors.LockNotAvailable:
|
except psycopg2.errors.LockNotAvailable:
|
||||||
# row locked = port occurring and probably going to succeed,
|
# row locked = port occurring and probably going to succeed,
|
||||||
# so next(real_limit) likely a done deal already
|
# so next(real_limit) likely a done deal already
|
||||||
return True, (
|
return Ping.ERROR, (
|
||||||
f"Forward port of {tip.display_name} likely already "
|
f"Forward port of {tip.display_name} likely already "
|
||||||
f"ongoing, unable to cancel, close next forward port "
|
f"ongoing, unable to cancel, close next forward port "
|
||||||
f"when it completes.")
|
f"when it completes.")
|
||||||
@ -1014,6 +1064,9 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
# forward porting was previously stopped at tip, and we want it to
|
# forward porting was previously stopped at tip, and we want it to
|
||||||
# resume
|
# resume
|
||||||
if tip.state == 'merged':
|
if tip.state == 'merged':
|
||||||
|
if tip.batch_id.source.fw_policy == 'no':
|
||||||
|
# hack to ping the user but not rollback the transaction
|
||||||
|
return Ping.YES, f"can not forward-port, policy is 'no' on {(tip.source_id or tip).display_name}"
|
||||||
self.env['forwardport.batches'].create({
|
self.env['forwardport.batches'].create({
|
||||||
'batch_id': tip.batch_id.id,
|
'batch_id': tip.batch_id.id,
|
||||||
'source': 'fp' if tip.parent_id else 'merge',
|
'source': 'fp' if tip.parent_id else 'merge',
|
||||||
@ -1048,7 +1101,7 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
for p in (self.source_id | root) - self
|
for p in (self.source_id | root) - self
|
||||||
])
|
])
|
||||||
|
|
||||||
return False, msg
|
return Ping.NO, msg
|
||||||
|
|
||||||
|
|
||||||
def _find_next_target(self) -> Optional[Branch]:
|
def _find_next_target(self) -> Optional[Branch]:
|
||||||
@ -1115,7 +1168,7 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
self.env.ref('runbot_merge.check_linked_prs_status')._trigger()
|
self.env.ref('runbot_merge.check_linked_prs_status')._trigger()
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _pr_acl(self, user):
|
def _pr_acl(self, user) -> ACL:
|
||||||
if not self:
|
if not self:
|
||||||
return ACL(False, False, False)
|
return ACL(False, False, False)
|
||||||
|
|
||||||
@ -1124,17 +1177,25 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
('repository_id', '=', self.repository.id),
|
('repository_id', '=', self.repository.id),
|
||||||
('review', '=', True) if self.author != user else ('self_review', '=', True),
|
('review', '=', True) if self.author != user else ('self_review', '=', True),
|
||||||
]) == 1
|
]) == 1
|
||||||
is_reviewer = is_admin or self in user.delegate_reviewer
|
if is_admin:
|
||||||
# TODO: should delegate reviewers be able to retry PRs?
|
return ACL(True, True, True)
|
||||||
is_author = is_reviewer or self.author == user
|
|
||||||
return ACL(is_admin, is_reviewer, is_author)
|
# delegate on source = delegate on PR
|
||||||
|
if self.source_id and self.source_id in user.delegate_reviewer:
|
||||||
|
return ACL(False, True, True)
|
||||||
|
# delegate on any ancestors ~ delegate on PR (maybe should be any descendant of source?)
|
||||||
|
if any(p in user.delegate_reviewer for p in self._iter_ancestors()):
|
||||||
|
return ACL(False, True, True)
|
||||||
|
|
||||||
|
# user is probably always False on a forward port
|
||||||
|
return ACL(False, False, self.author == user)
|
||||||
|
|
||||||
def _validate(self, statuses):
|
def _validate(self, statuses):
|
||||||
# could have two PRs (e.g. one open and one closed) at least
|
# could have two PRs (e.g. one open and one closed) at least
|
||||||
# temporarily on the same head, or on the same head with different
|
# temporarily on the same head, or on the same head with different
|
||||||
# targets
|
# targets
|
||||||
updateable = self.filtered(lambda p: not p.merge_date)
|
updateable = self.filtered(lambda p: not p.merge_date)
|
||||||
updateable.statuses = statuses
|
updateable.statuses = statuses or '{}'
|
||||||
for pr in updateable:
|
for pr in updateable:
|
||||||
if pr.status == "failure":
|
if pr.status == "failure":
|
||||||
statuses = json.loads(pr.statuses_full)
|
statuses = json.loads(pr.statuses_full)
|
||||||
@ -1160,7 +1221,7 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
super().modified(fnames, create, before)
|
super().modified(fnames, create, before)
|
||||||
|
|
||||||
@api.depends(
|
@api.depends(
|
||||||
'statuses', 'overrides', 'target', 'parent_id',
|
'statuses', 'overrides', 'target', 'parent_id', 'skipchecks',
|
||||||
'repository.status_ids.context',
|
'repository.status_ids.context',
|
||||||
'repository.status_ids.branch_filter',
|
'repository.status_ids.branch_filter',
|
||||||
'repository.status_ids.prs',
|
'repository.status_ids.prs',
|
||||||
@ -1170,6 +1231,9 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
statuses = {**json.loads(pr.statuses), **pr._get_overrides()}
|
statuses = {**json.loads(pr.statuses), **pr._get_overrides()}
|
||||||
|
|
||||||
pr.statuses_full = json.dumps(statuses, indent=4)
|
pr.statuses_full = json.dumps(statuses, indent=4)
|
||||||
|
if pr.skipchecks:
|
||||||
|
pr.status = 'success'
|
||||||
|
continue
|
||||||
|
|
||||||
st = 'success'
|
st = 'success'
|
||||||
for ci in pr.repository.status_ids._for_pr(pr):
|
for ci in pr.repository.status_ids._for_pr(pr):
|
||||||
@ -1179,6 +1243,9 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
break
|
break
|
||||||
if v == 'pending':
|
if v == 'pending':
|
||||||
st = 'pending'
|
st = 'pending'
|
||||||
|
if pr.status != 'failure' and st == 'failure':
|
||||||
|
pr.unstage("had CI failure after staging")
|
||||||
|
|
||||||
pr.status = st
|
pr.status = st
|
||||||
|
|
||||||
@api.depends(
|
@api.depends(
|
||||||
@ -1188,10 +1255,10 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
)
|
)
|
||||||
def _compute_state(self):
|
def _compute_state(self):
|
||||||
for pr in self:
|
for pr in self:
|
||||||
if pr.batch_id.merge_date:
|
if pr.closed:
|
||||||
pr.state = 'merged'
|
|
||||||
elif pr.closed:
|
|
||||||
pr.state = "closed"
|
pr.state = "closed"
|
||||||
|
elif pr.batch_id.merge_date:
|
||||||
|
pr.state = 'merged'
|
||||||
elif pr.error:
|
elif pr.error:
|
||||||
pr.state = "error"
|
pr.state = "error"
|
||||||
elif pr.batch_id.skipchecks: # skipchecks behaves as both approval and status override
|
elif pr.batch_id.skipchecks: # skipchecks behaves as both approval and status override
|
||||||
@ -1276,12 +1343,6 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
"ON runbot_merge_pull_requests "
|
"ON runbot_merge_pull_requests "
|
||||||
"USING hash (head)")
|
"USING hash (head)")
|
||||||
|
|
||||||
@property
|
|
||||||
def _tagstate(self):
|
|
||||||
if self.state == 'ready' and self.staging_id.heads:
|
|
||||||
return 'staged'
|
|
||||||
return self.state
|
|
||||||
|
|
||||||
def _get_batch(self, *, target, label):
|
def _get_batch(self, *, target, label):
|
||||||
batch = self.env['runbot_merge.batch']
|
batch = self.env['runbot_merge.batch']
|
||||||
if not re.search(r':patch-\d+$', label):
|
if not re.search(r':patch-\d+$', label):
|
||||||
@ -1294,9 +1355,14 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
|
|
||||||
@api.model_create_multi
|
@api.model_create_multi
|
||||||
def create(self, vals_list):
|
def create(self, vals_list):
|
||||||
|
batches = {}
|
||||||
for vals in vals_list:
|
for vals in vals_list:
|
||||||
batch = self._get_batch(target=vals['target'], label=vals['label'])
|
batch_key = vals['target'], vals['label']
|
||||||
|
batch = batches.get(batch_key)
|
||||||
|
if batch is None:
|
||||||
|
batch = batches[batch_key] = self._get_batch(target=vals['target'], label=vals['label'])
|
||||||
vals['batch_id'] = batch.id
|
vals['batch_id'] = batch.id
|
||||||
|
|
||||||
if 'limit_id' not in vals:
|
if 'limit_id' not in vals:
|
||||||
limits = {p.limit_id for p in batch.prs}
|
limits = {p.limit_id for p in batch.prs}
|
||||||
if len(limits) == 1:
|
if len(limits) == 1:
|
||||||
@ -1315,7 +1381,7 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
prs = super().create(vals_list)
|
prs = super().create(vals_list)
|
||||||
for pr in prs:
|
for pr in prs:
|
||||||
c = self.env['runbot_merge.commit'].search([('sha', '=', pr.head)])
|
c = self.env['runbot_merge.commit'].search([('sha', '=', pr.head)])
|
||||||
pr._validate(c.statuses or '{}')
|
pr._validate(c.statuses)
|
||||||
|
|
||||||
if pr.state not in ('closed', 'merged'):
|
if pr.state not in ('closed', 'merged'):
|
||||||
self.env.ref('runbot_merge.pr.created')._send(
|
self.env.ref('runbot_merge.pr.created')._send(
|
||||||
@ -1392,8 +1458,16 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
|
|
||||||
newhead = vals.get('head')
|
newhead = vals.get('head')
|
||||||
if newhead:
|
if newhead:
|
||||||
|
authors = self.env.cr.precommit.data.get(f'mail.tracking.author.{self._name}', {})
|
||||||
|
for p in self:
|
||||||
|
if pid := authors.get(p.id):
|
||||||
|
writer = self.env['res.partner'].browse(pid)
|
||||||
|
else:
|
||||||
|
writer = self.env.user.partner_id
|
||||||
|
p.unstage("updated by %s", writer.github_login or writer.name)
|
||||||
|
# this can be batched
|
||||||
c = self.env['runbot_merge.commit'].search([('sha', '=', newhead)])
|
c = self.env['runbot_merge.commit'].search([('sha', '=', newhead)])
|
||||||
self._validate(c.statuses or '{}')
|
self._validate(c.statuses)
|
||||||
return w
|
return w
|
||||||
|
|
||||||
def _check_linked_prs_statuses(self, commit=False):
|
def _check_linked_prs_statuses(self, commit=False):
|
||||||
@ -1628,6 +1702,13 @@ For your own safety I've ignored *everything in your entire comment*.
|
|||||||
'batch_id': batch.create({}).id,
|
'batch_id': batch.create({}).id,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
class Ping(IntEnum):
|
||||||
|
NO = 0
|
||||||
|
YES = 1
|
||||||
|
ERROR = 2
|
||||||
|
|
||||||
|
|
||||||
# ordering is a bit unintuitive because the lowest sequence (and name)
|
# ordering is a bit unintuitive because the lowest sequence (and name)
|
||||||
# is the last link of the fp chain, reasoning is a bit more natural the
|
# is the last link of the fp chain, reasoning is a bit more natural the
|
||||||
# other way around (highest object is the last), especially with Python
|
# other way around (highest object is the last), especially with Python
|
||||||
@ -1689,6 +1770,8 @@ class Tagging(models.Model):
|
|||||||
values['tags_remove'] = json.dumps(list(values['tags_remove']))
|
values['tags_remove'] = json.dumps(list(values['tags_remove']))
|
||||||
if not isinstance(values.get('tags_add', ''), str):
|
if not isinstance(values.get('tags_add', ''), str):
|
||||||
values['tags_add'] = json.dumps(list(values['tags_add']))
|
values['tags_add'] = json.dumps(list(values['tags_add']))
|
||||||
|
if any(vals_list):
|
||||||
|
self.env.ref('runbot_merge.labels_cron')._trigger()
|
||||||
return super().create(vals_list)
|
return super().create(vals_list)
|
||||||
|
|
||||||
def _send(self):
|
def _send(self):
|
||||||
@ -1894,23 +1977,25 @@ class Commit(models.Model):
|
|||||||
pull_requests = fields.One2many('runbot_merge.pull_requests', compute='_compute_prs')
|
pull_requests = fields.One2many('runbot_merge.pull_requests', compute='_compute_prs')
|
||||||
|
|
||||||
@api.model_create_multi
|
@api.model_create_multi
|
||||||
def create(self, values):
|
def create(self, vals_list):
|
||||||
for vals in values:
|
for values in vals_list:
|
||||||
vals['to_check'] = True
|
values['to_check'] = True
|
||||||
r = super(Commit, self).create(values)
|
r = super().create(vals_list)
|
||||||
self.env.ref("runbot_merge.process_updated_commits")._trigger()
|
self.env.ref("runbot_merge.process_updated_commits")._trigger()
|
||||||
return r
|
return r
|
||||||
|
|
||||||
def write(self, values):
|
def write(self, values):
|
||||||
values.setdefault('to_check', True)
|
values.setdefault('to_check', True)
|
||||||
r = super(Commit, self).write(values)
|
r = super().write(values)
|
||||||
if values['to_check']:
|
if values['to_check']:
|
||||||
self.env.ref("runbot_merge.process_updated_commits")._trigger()
|
self.env.ref("runbot_merge.process_updated_commits")._trigger()
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
@mute_logger('odoo.sql_db')
|
||||||
def _notify(self):
|
def _notify(self):
|
||||||
Stagings = self.env['runbot_merge.stagings']
|
Stagings = self.env['runbot_merge.stagings']
|
||||||
PRs = self.env['runbot_merge.pull_requests']
|
PRs = self.env['runbot_merge.pull_requests']
|
||||||
|
serialization_failures = False
|
||||||
# chances are low that we'll have more than one commit
|
# chances are low that we'll have more than one commit
|
||||||
for c in self.search([('to_check', '=', True)]):
|
for c in self.search([('to_check', '=', True)]):
|
||||||
sha = c.sha
|
sha = c.sha
|
||||||
@ -1930,20 +2015,23 @@ class Commit(models.Model):
|
|||||||
if stagings:
|
if stagings:
|
||||||
stagings._notify(c)
|
stagings._notify(c)
|
||||||
except psycopg2.errors.SerializationFailure:
|
except psycopg2.errors.SerializationFailure:
|
||||||
_logger.info("Failed to apply commit %s (%s): serialization failure", c, sha)
|
serialization_failures = True
|
||||||
|
_logger.info("Failed to apply commit %s: serialization failure", sha)
|
||||||
self.env.cr.rollback()
|
self.env.cr.rollback()
|
||||||
except Exception:
|
except Exception:
|
||||||
_logger.exception("Failed to apply commit %s (%s)", c, sha)
|
_logger.exception("Failed to apply commit %s", sha)
|
||||||
self.env.cr.rollback()
|
self.env.cr.rollback()
|
||||||
else:
|
else:
|
||||||
self.env.cr.commit()
|
self.env.cr.commit()
|
||||||
|
if serialization_failures:
|
||||||
|
self.env.ref("runbot_merge.process_updated_commits")._trigger()
|
||||||
|
|
||||||
_sql_constraints = [
|
_sql_constraints = [
|
||||||
('unique_sha', 'unique (sha)', 'no duplicated commit'),
|
('unique_sha', 'unique (sha)', 'no duplicated commit'),
|
||||||
]
|
]
|
||||||
|
|
||||||
def _auto_init(self):
|
def _auto_init(self):
|
||||||
res = super(Commit, self)._auto_init()
|
res = super()._auto_init()
|
||||||
self._cr.execute("""
|
self._cr.execute("""
|
||||||
CREATE INDEX IF NOT EXISTS runbot_merge_unique_statuses
|
CREATE INDEX IF NOT EXISTS runbot_merge_unique_statuses
|
||||||
ON runbot_merge_commit
|
ON runbot_merge_commit
|
||||||
@ -1985,7 +2073,7 @@ class Stagings(models.Model):
|
|||||||
active = fields.Boolean(default=True)
|
active = fields.Boolean(default=True)
|
||||||
|
|
||||||
staged_at = fields.Datetime(default=fields.Datetime.now, index=True)
|
staged_at = fields.Datetime(default=fields.Datetime.now, index=True)
|
||||||
staging_end = fields.Datetime(store=True, compute='_compute_state')
|
staging_end = fields.Datetime(store=True)
|
||||||
staging_duration = fields.Float(compute='_compute_duration')
|
staging_duration = fields.Float(compute='_compute_duration')
|
||||||
timeout_limit = fields.Datetime(store=True, compute='_compute_timeout_limit')
|
timeout_limit = fields.Datetime(store=True, compute='_compute_timeout_limit')
|
||||||
reason = fields.Text("Reason for final state (if any)")
|
reason = fields.Text("Reason for final state (if any)")
|
||||||
@ -2050,6 +2138,7 @@ class Stagings(models.Model):
|
|||||||
._trigger(fields.Datetime.to_datetime(timeout))
|
._trigger(fields.Datetime.to_datetime(timeout))
|
||||||
|
|
||||||
if vals.get('active') is False:
|
if vals.get('active') is False:
|
||||||
|
vals['staging_end'] = fields.Datetime.now()
|
||||||
self.env.ref("runbot_merge.staging_cron")._trigger()
|
self.env.ref("runbot_merge.staging_cron")._trigger()
|
||||||
|
|
||||||
return super().write(vals)
|
return super().write(vals)
|
||||||
@ -2088,6 +2177,7 @@ class Stagings(models.Model):
|
|||||||
if not self.env.user.has_group('runbot_merge.status'):
|
if not self.env.user.has_group('runbot_merge.status'):
|
||||||
raise AccessError("You are not allowed to post a status.")
|
raise AccessError("You are not allowed to post a status.")
|
||||||
|
|
||||||
|
now = datetime.datetime.now().isoformat(timespec='seconds')
|
||||||
for s in self:
|
for s in self:
|
||||||
if not s.target.project_id.staging_rpc:
|
if not s.target.project_id.staging_rpc:
|
||||||
continue
|
continue
|
||||||
@ -2100,6 +2190,7 @@ class Stagings(models.Model):
|
|||||||
'state': status,
|
'state': status,
|
||||||
'target_url': target_url,
|
'target_url': target_url,
|
||||||
'description': description,
|
'description': description,
|
||||||
|
'updated_at': now,
|
||||||
}
|
}
|
||||||
s.statuses_cache = json.dumps(st)
|
s.statuses_cache = json.dumps(st)
|
||||||
|
|
||||||
@ -2113,40 +2204,45 @@ class Stagings(models.Model):
|
|||||||
"heads.repository_id.status_ids.context",
|
"heads.repository_id.status_ids.context",
|
||||||
)
|
)
|
||||||
def _compute_state(self):
|
def _compute_state(self):
|
||||||
for s in self:
|
for staging in self:
|
||||||
if s.state != 'pending':
|
if staging.state != 'pending':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# maps commits to the statuses they need
|
# maps commits to the statuses they need
|
||||||
required_statuses = [
|
required_statuses = [
|
||||||
(h.commit_id.sha, h.repository_id.status_ids._for_staging(s).mapped('context'))
|
(h.commit_id.sha, h.repository_id.status_ids._for_staging(staging).mapped('context'))
|
||||||
for h in s.heads
|
for h in staging.heads
|
||||||
]
|
]
|
||||||
cmap = json.loads(s.statuses_cache)
|
cmap = json.loads(staging.statuses_cache)
|
||||||
|
|
||||||
update_timeout_limit = False
|
last_pending = ""
|
||||||
st = 'success'
|
state = 'success'
|
||||||
for head, reqs in required_statuses:
|
for head, reqs in required_statuses:
|
||||||
statuses = cmap.get(head) or {}
|
statuses = cmap.get(head) or {}
|
||||||
for v in map(lambda n: statuses.get(n, {}).get('state'), reqs):
|
for status in (statuses.get(n, {}) for n in reqs):
|
||||||
if st == 'failure' or v in ('error', 'failure'):
|
v = status.get('state')
|
||||||
st = 'failure'
|
if state == 'failure' or v in ('error', 'failure'):
|
||||||
|
state = 'failure'
|
||||||
elif v is None:
|
elif v is None:
|
||||||
st = 'pending'
|
state = 'pending'
|
||||||
elif v == 'pending':
|
elif v == 'pending':
|
||||||
st = 'pending'
|
state = 'pending'
|
||||||
update_timeout_limit = True
|
last_pending = max(last_pending, status.get('updated_at', ''))
|
||||||
else:
|
else:
|
||||||
assert v == 'success'
|
assert v == 'success'
|
||||||
|
|
||||||
s.state = st
|
staging.state = state
|
||||||
if s.state != 'pending':
|
if staging.state != 'pending':
|
||||||
self.env.ref("runbot_merge.merge_cron")._trigger()
|
self.env.ref("runbot_merge.merge_cron")._trigger()
|
||||||
s.staging_end = fields.Datetime.now()
|
|
||||||
if update_timeout_limit:
|
if last_pending:
|
||||||
s.timeout_limit = datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout)
|
timeout = datetime.datetime.fromisoformat(last_pending) \
|
||||||
self.env.ref("runbot_merge.merge_cron")._trigger(s.timeout_limit)
|
+ datetime.timedelta(minutes=staging.target.project_id.ci_timeout)
|
||||||
_logger.debug("%s got pending status, bumping timeout to %s (%s)", self, s.timeout_limit, cmap)
|
|
||||||
|
if timeout > staging.timeout_limit:
|
||||||
|
staging.timeout_limit = timeout
|
||||||
|
self.env.ref("runbot_merge.merge_cron")._trigger(timeout)
|
||||||
|
_logger.debug("%s got pending status, bumping timeout to %s", staging, timeout)
|
||||||
|
|
||||||
def action_cancel(self):
|
def action_cancel(self):
|
||||||
w = self.env['runbot_merge.stagings.cancel'].create({
|
w = self.env['runbot_merge.stagings.cancel'].create({
|
||||||
@ -2297,7 +2393,7 @@ class Stagings(models.Model):
|
|||||||
prs._track_set_log_message(f'staging {self.id} succeeded')
|
prs._track_set_log_message(f'staging {self.id} succeeded')
|
||||||
logger.info(
|
logger.info(
|
||||||
"%s FF successful, marking %s as merged",
|
"%s FF successful, marking %s as merged",
|
||||||
self, prs
|
self, prs.mapped('display_name'),
|
||||||
)
|
)
|
||||||
self.batch_ids.merge_date = fields.Datetime.now()
|
self.batch_ids.merge_date = fields.Datetime.now()
|
||||||
|
|
||||||
@ -2423,31 +2519,47 @@ class FetchJob(models.Model):
|
|||||||
repository = fields.Many2one('runbot_merge.repository', required=True)
|
repository = fields.Many2one('runbot_merge.repository', required=True)
|
||||||
number = fields.Integer(required=True, group_operator=None)
|
number = fields.Integer(required=True, group_operator=None)
|
||||||
closing = fields.Boolean(default=False)
|
closing = fields.Boolean(default=False)
|
||||||
|
commits_at = fields.Datetime(index="btree_not_null")
|
||||||
|
commenter = fields.Char()
|
||||||
|
|
||||||
@api.model_create_multi
|
@api.model_create_multi
|
||||||
def create(self, vals_list):
|
def create(self, vals_list):
|
||||||
self.env.ref('runbot_merge.fetch_prs_cron')._trigger()
|
now = fields.Datetime.now()
|
||||||
|
self.env.ref('runbot_merge.fetch_prs_cron')._trigger({
|
||||||
|
fields.Datetime.to_datetime(
|
||||||
|
vs.get('commits_at') or now
|
||||||
|
)
|
||||||
|
for vs in vals_list
|
||||||
|
})
|
||||||
return super().create(vals_list)
|
return super().create(vals_list)
|
||||||
|
|
||||||
def _check(self, commit=False):
|
def _check(self, commit=False):
|
||||||
"""
|
"""
|
||||||
:param bool commit: commit after each fetch has been executed
|
:param bool commit: commit after each fetch has been executed
|
||||||
"""
|
"""
|
||||||
|
now = getattr(builtins, 'current_date', None) or fields.Datetime.to_string(datetime.datetime.now())
|
||||||
while True:
|
while True:
|
||||||
f = self.search([], limit=1)
|
f = self.search([
|
||||||
|
'|', ('commits_at', '=', False), ('commits_at', '<=', now)
|
||||||
|
], limit=1)
|
||||||
if not f:
|
if not f:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
f.active = False
|
||||||
self.env.cr.execute("SAVEPOINT runbot_merge_before_fetch")
|
self.env.cr.execute("SAVEPOINT runbot_merge_before_fetch")
|
||||||
try:
|
try:
|
||||||
f.repository._load_pr(f.number, closing=f.closing)
|
f.repository._load_pr(
|
||||||
|
f.number,
|
||||||
|
closing=f.closing,
|
||||||
|
squash=bool(f.commits_at),
|
||||||
|
ping=f.commenter and f'@{f.commenter} ',
|
||||||
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
self.env.cr.execute("ROLLBACK TO SAVEPOINT runbot_merge_before_fetch")
|
self.env.cr.execute("ROLLBACK TO SAVEPOINT runbot_merge_before_fetch")
|
||||||
_logger.exception("Failed to load pr %s, skipping it", f.number)
|
_logger.exception("Failed to load pr %s, skipping it", f.number)
|
||||||
finally:
|
finally:
|
||||||
self.env.cr.execute("RELEASE SAVEPOINT runbot_merge_before_fetch")
|
self.env.cr.execute("RELEASE SAVEPOINT runbot_merge_before_fetch")
|
||||||
|
|
||||||
f.active = False
|
|
||||||
if commit:
|
if commit:
|
||||||
self.env.cr.commit()
|
self.env.cr.commit()
|
||||||
|
|
||||||
|
@ -195,11 +195,15 @@ def ready_batches(for_branch: Branch) -> Tuple[bool, Batch]:
|
|||||||
# staged through priority *and* through split.
|
# staged through priority *and* through split.
|
||||||
split_ids = for_branch.split_ids.batch_ids.ids
|
split_ids = for_branch.split_ids.batch_ids.ids
|
||||||
env.cr.execute("""
|
env.cr.execute("""
|
||||||
SELECT max(priority)
|
SELECT exists (
|
||||||
FROM runbot_merge_batch
|
SELECT FROM runbot_merge_batch
|
||||||
WHERE blocked IS NULL AND target = %s AND NOT id = any(%s)
|
WHERE priority = 'alone'
|
||||||
|
AND blocked IS NULL
|
||||||
|
AND target = %s
|
||||||
|
AND NOT id = any(%s)
|
||||||
|
)
|
||||||
""", [for_branch.id, split_ids])
|
""", [for_branch.id, split_ids])
|
||||||
alone = env.cr.fetchone()[0] == 'alone'
|
[alone] = env.cr.fetchone()
|
||||||
|
|
||||||
return (
|
return (
|
||||||
alone,
|
alone,
|
||||||
@ -208,7 +212,7 @@ def ready_batches(for_branch: Branch) -> Tuple[bool, Batch]:
|
|||||||
('blocked', '=', False),
|
('blocked', '=', False),
|
||||||
('priority', '=', 'alone') if alone else (1, '=', 1),
|
('priority', '=', 'alone') if alone else (1, '=', 1),
|
||||||
('id', 'not in', split_ids),
|
('id', 'not in', split_ids),
|
||||||
], order="priority DESC, id ASC"),
|
], order="priority DESC, write_date ASC, id ASC"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -240,7 +244,8 @@ def staging_setup(
|
|||||||
# be hooked only to "proper" remote-tracking branches
|
# be hooked only to "proper" remote-tracking branches
|
||||||
# (in `refs/remotes`), it doesn't seem to work here
|
# (in `refs/remotes`), it doesn't seem to work here
|
||||||
f'+refs/heads/{target.name}:refs/heads/{target.name}',
|
f'+refs/heads/{target.name}:refs/heads/{target.name}',
|
||||||
*(pr.head for pr in by_repo.get(repo, []))
|
*(pr.head for pr in by_repo.get(repo, [])),
|
||||||
|
no_tags=True,
|
||||||
)
|
)
|
||||||
original_heads[repo] = head
|
original_heads[repo] = head
|
||||||
staging_state[repo] = StagingSlice(gh=gh, head=head, repo=source.stdout().with_config(text=True, check=False))
|
staging_state[repo] = StagingSlice(gh=gh, head=head, repo=source.stdout().with_config(text=True, check=False))
|
||||||
@ -257,6 +262,10 @@ def stage_batches(branch: Branch, batches: Batch, staging_state: StagingState) -
|
|||||||
break
|
break
|
||||||
try:
|
try:
|
||||||
staged |= stage_batch(env, batch, staging_state)
|
staged |= stage_batch(env, batch, staging_state)
|
||||||
|
except exceptions.Skip:
|
||||||
|
# skip silently because the PR will be retried on every staging
|
||||||
|
# which is going to be ultra spammy
|
||||||
|
pass
|
||||||
except exceptions.MergeError as e:
|
except exceptions.MergeError as e:
|
||||||
pr = e.args[0]
|
pr = e.args[0]
|
||||||
_logger.info("Failed to stage %s into %s", pr.display_name, branch.name)
|
_logger.info("Failed to stage %s into %s", pr.display_name, branch.name)
|
||||||
@ -414,9 +423,19 @@ def stage(pr: PullRequests, info: StagingSlice, related_prs: PullRequests) -> Tu
|
|||||||
invalid['target'] = branch.id
|
invalid['target'] = branch.id
|
||||||
diff.append(('Target branch', pr.target.name, branch.name))
|
diff.append(('Target branch', pr.target.name, branch.name))
|
||||||
|
|
||||||
if pr.squash != commits == 1:
|
if not method:
|
||||||
invalid['squash'] = commits == 1
|
if not pr.method_warned:
|
||||||
diff.append(('Single commit', pr.squash, commits == 1))
|
pr.env.ref('runbot_merge.pr.merge_method')._send(
|
||||||
|
repository=pr.repository,
|
||||||
|
pull_request=pr.number,
|
||||||
|
format_args={'pr': pr, 'methods': ''.join(
|
||||||
|
'* `%s` to %s\n' % pair
|
||||||
|
for pair in type(pr).merge_method.selection
|
||||||
|
if pair[0] != 'squash'
|
||||||
|
)},
|
||||||
|
)
|
||||||
|
pr.method_warned = True
|
||||||
|
raise exceptions.Skip()
|
||||||
|
|
||||||
msg = utils.make_message(prdict)
|
msg = utils.make_message(prdict)
|
||||||
if pr.message != msg:
|
if pr.message != msg:
|
||||||
|
@ -29,6 +29,7 @@ def dfm(repository: str, text: str) -> Markup:
|
|||||||
"""
|
"""
|
||||||
t = DFM_CONTEXT_REPO.set(repository)
|
t = DFM_CONTEXT_REPO.set(repository)
|
||||||
try:
|
try:
|
||||||
|
dfm_renderer.reset()
|
||||||
return Markup(dfm_renderer.convert(escape(text)))
|
return Markup(dfm_renderer.convert(escape(text)))
|
||||||
finally:
|
finally:
|
||||||
DFM_CONTEXT_REPO.reset(t)
|
DFM_CONTEXT_REPO.reset(t)
|
||||||
|
@ -30,5 +30,6 @@ access_runbot_merge_pull_requests,User access to PR,model_runbot_merge_pull_requ
|
|||||||
access_runbot_merge_pull_requests_feedback,Users have no reason to access feedback,model_runbot_merge_pull_requests_feedback,,0,0,0,0
|
access_runbot_merge_pull_requests_feedback,Users have no reason to access feedback,model_runbot_merge_pull_requests_feedback,,0,0,0,0
|
||||||
access_runbot_merge_review_rights_2,Users can see partners,model_res_partner_review,base.group_user,1,0,0,0
|
access_runbot_merge_review_rights_2,Users can see partners,model_res_partner_review,base.group_user,1,0,0,0
|
||||||
access_runbot_merge_review_override_2,Users can see partners,model_res_partner_override,base.group_user,1,0,0,0
|
access_runbot_merge_review_override_2,Users can see partners,model_res_partner_override,base.group_user,1,0,0,0
|
||||||
runbot_merge.access_runbot_merge_pull_requests_feedback_template,access_runbot_merge_pull_requests_feedback_template,runbot_merge.model_runbot_merge_pull_requests_feedback_template,base.group_system,1,1,0,0
|
access_runbot_merge_pull_requests_feedback_template,access_runbot_merge_pull_requests_feedback_template,runbot_merge.model_runbot_merge_pull_requests_feedback_template,base.group_system,1,1,0,0
|
||||||
|
access_runbot_merge_patch,Patcher access,runbot_merge.model_runbot_merge_patch,runbot_merge.group_patcher,1,1,1,0
|
||||||
|
access_runbot_merge_backport_admin,Admin access to backport wizard,model_runbot_merge_pull_requests_backport,runbot_merge.group_admin,1,1,1,0
|
||||||
|
|
@ -1,9 +1,15 @@
|
|||||||
<odoo>
|
<odoo>
|
||||||
|
<record model="res.groups" id="group_patcher">
|
||||||
|
<field name="name">Mergebot Patcher</field>
|
||||||
|
</record>
|
||||||
<record model="res.groups" id="group_admin">
|
<record model="res.groups" id="group_admin">
|
||||||
<field name="name">Mergebot Administrator</field>
|
<field name="name">Mergebot Administrator</field>
|
||||||
</record>
|
</record>
|
||||||
<record model="res.groups" id="base.group_system">
|
<record model="res.groups" id="base.group_system">
|
||||||
<field name="implied_ids" eval="[(4, ref('runbot_merge.group_admin'))]"/>
|
<field name="implied_ids" eval="[
|
||||||
|
(4, ref('runbot_merge.group_admin')),
|
||||||
|
(4, ref('runbot_merge.group_patcher')),
|
||||||
|
]"/>
|
||||||
</record>
|
</record>
|
||||||
<record model="res.groups" id="status">
|
<record model="res.groups" id="status">
|
||||||
<field name="name">Mergebot Status Sender</field>
|
<field name="name">Mergebot Status Sender</field>
|
||||||
|
@ -11,7 +11,8 @@ import requests
|
|||||||
from lxml import html
|
from lxml import html
|
||||||
|
|
||||||
import odoo
|
import odoo
|
||||||
from utils import _simple_init, seen, matches, get_partner, Commit, pr_page, to_pr, part_of, ensure_one
|
from utils import _simple_init, seen, matches, get_partner, Commit, pr_page, to_pr, part_of, ensure_one, read_tracking_value
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(autouse=True)
|
@pytest.fixture(autouse=True)
|
||||||
def _configure_statuses(request, project, repo):
|
def _configure_statuses(request, project, repo):
|
||||||
@ -172,28 +173,29 @@ def test_trivial_flow(env, repo, page, users, config):
|
|||||||
|
|
||||||
# reverse because the messages are in newest-to-oldest by default
|
# reverse because the messages are in newest-to-oldest by default
|
||||||
# (as that's how you want to read them)
|
# (as that's how you want to read them)
|
||||||
messages = reversed([
|
messages = pr_id.message_ids[::-1].mapped(lambda m: (
|
||||||
(m.author_id.display_name, m.body, [get_tracking_values(v) for v in m.tracking_value_ids])
|
m.author_id.display_name,
|
||||||
for m in pr_id.message_ids
|
m.body,
|
||||||
])
|
list(map(read_tracking_value, m.tracking_value_ids)),
|
||||||
|
))
|
||||||
|
|
||||||
assert list(messages) == [
|
assert list(messages) == [
|
||||||
(users['user'], '<p>Pull Request created</p>', []),
|
(users['user'], '<p>Pull Request created</p>', []),
|
||||||
(users['user'], '', [(c1, c2)]),
|
(users['user'], '', [('head', c1, c2)]),
|
||||||
('OdooBot', f'<p>statuses changed on {c2}</p>', [('Opened', 'Validated')]),
|
('OdooBot', f'<p>statuses changed on {c2}</p>', [('state', 'Opened', 'Validated')]),
|
||||||
# reviewer approved changing the state and setting reviewer as reviewer
|
# reviewer approved changing the state and setting reviewer as reviewer
|
||||||
# plus set merge method
|
# plus set merge method
|
||||||
('Reviewer', '', [
|
('Reviewer', '', [
|
||||||
('', 'rebase and merge, using the PR as merge commit message'),
|
('state', 'Validated', 'Ready'),
|
||||||
('', 'Reviewer'),
|
('merge_method', '', 'rebase and merge, using the PR as merge commit message'),
|
||||||
('Validated', 'Ready'),
|
('reviewed_by', '', 'Reviewer'),
|
||||||
]),
|
]),
|
||||||
# staging succeeded
|
# staging succeeded
|
||||||
(matches('$$'), f'<p>staging {st.id} succeeded</p>', [
|
(matches('$$'), f'<p>staging {st.id} succeeded</p>', [
|
||||||
# set merge date
|
# set merge date
|
||||||
(False, pr_id.merge_date),
|
('merge_date', False, pr_id.merge_date),
|
||||||
# updated state
|
# updated state
|
||||||
('Ready', 'Merged'),
|
('state', 'Ready', 'Merged'),
|
||||||
]),
|
]),
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -617,7 +619,6 @@ def test_staging_ci_timeout(env, repo, config, page, update_op: Callable[[int],
|
|||||||
assert dangerbox
|
assert dangerbox
|
||||||
assert dangerbox[0].text == 'timed out (>60 minutes)'
|
assert dangerbox[0].text == 'timed out (>60 minutes)'
|
||||||
|
|
||||||
@pytest.mark.defaultstatuses
|
|
||||||
def test_timeout_bump_on_pending(env, repo, config):
|
def test_timeout_bump_on_pending(env, repo, config):
|
||||||
with repo:
|
with repo:
|
||||||
[m, c] = repo.make_commits(
|
[m, c] = repo.make_commits(
|
||||||
@ -627,8 +628,9 @@ def test_timeout_bump_on_pending(env, repo, config):
|
|||||||
)
|
)
|
||||||
repo.make_ref('heads/master', m)
|
repo.make_ref('heads/master', m)
|
||||||
|
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
prx = repo.make_pr(target='master', head=c)
|
||||||
repo.post_status(prx.head, 'success')
|
repo.post_status(prx.head, 'success', 'ci/runbot')
|
||||||
|
repo.post_status(prx.head, 'success', 'legal/cla')
|
||||||
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
@ -636,9 +638,18 @@ def test_timeout_bump_on_pending(env, repo, config):
|
|||||||
old_timeout = odoo.fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(days=15))
|
old_timeout = odoo.fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(days=15))
|
||||||
st.timeout_limit = old_timeout
|
st.timeout_limit = old_timeout
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.master', 'pending')
|
repo.post_status('staging.master', 'pending', 'ci/runbot')
|
||||||
env.run_crons(None)
|
env.run_crons(None)
|
||||||
assert st.timeout_limit > old_timeout
|
assert st.timeout_limit > old_timeout, "receiving a pending status should bump the timeout"
|
||||||
|
|
||||||
|
st.timeout_limit = old_timeout
|
||||||
|
# clear the statuses cache to remove the memoized status times
|
||||||
|
st.statuses_cache = "{}"
|
||||||
|
st.commit_ids.statuses = "{}"
|
||||||
|
with repo:
|
||||||
|
repo.post_status('staging.master', 'success', 'legal/cla')
|
||||||
|
env.run_crons(None)
|
||||||
|
assert st.timeout_limit == old_timeout, "receiving a success status should *not* bump the timeout"
|
||||||
|
|
||||||
def test_staging_ci_failure_single(env, repo, users, config, page):
|
def test_staging_ci_failure_single(env, repo, users, config, page):
|
||||||
""" on failure of single-PR staging, mark & notify failure
|
""" on failure of single-PR staging, mark & notify failure
|
||||||
@ -2203,83 +2214,53 @@ Co-authored-by: {other_user['name']} <{other_user['email']}>\
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class TestPRUpdate(object):
|
class TestPRUpdate:
|
||||||
""" Pushing on a PR should update the HEAD except for merged PRs, it
|
""" Pushing on a PR should update the HEAD except for merged PRs, it
|
||||||
can have additional effect (see individual tests)
|
can have additional effect (see individual tests)
|
||||||
"""
|
"""
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def master(self, repo):
|
||||||
|
with repo:
|
||||||
|
[m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref="heads/master")
|
||||||
|
return m
|
||||||
|
|
||||||
def test_update_opened(self, env, repo):
|
def test_update_opened(self, env, repo):
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
[c] = repo.make_commits("master", Commit('first', tree={'m': 'c1'}))
|
||||||
repo.make_ref('heads/master', m)
|
prx = repo.make_pr(target='master', head=c)
|
||||||
|
|
||||||
c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'})
|
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
|
||||||
|
|
||||||
pr = to_pr(env, prx)
|
pr = to_pr(env, prx)
|
||||||
assert pr.head == c
|
assert pr.head == c
|
||||||
# alter & push force PR entirely
|
# alter & push force PR entirely
|
||||||
with repo:
|
with repo:
|
||||||
c2 = repo.make_commit(m, 'first', None, tree={'m': 'cc'})
|
[c2] = repo.make_commits("master", Commit('first', tree={'m': 'cc'}))
|
||||||
repo.update_ref(prx.ref, c2, force=True)
|
|
||||||
assert pr.head == c2
|
|
||||||
|
|
||||||
def test_reopen_update(self, env, repo, config):
|
|
||||||
with repo:
|
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
|
||||||
repo.make_ref('heads/master', m)
|
|
||||||
|
|
||||||
c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'})
|
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
|
||||||
prx.post_comment("hansen r+", config['role_reviewer']['token'])
|
|
||||||
|
|
||||||
pr = to_pr(env, prx)
|
|
||||||
assert pr.state == 'approved'
|
|
||||||
assert pr.reviewed_by
|
|
||||||
with repo:
|
|
||||||
prx.close()
|
|
||||||
assert pr.state == 'closed'
|
|
||||||
assert pr.head == c
|
|
||||||
assert not pr.reviewed_by
|
|
||||||
|
|
||||||
with repo:
|
|
||||||
prx.open()
|
|
||||||
assert pr.state == 'opened'
|
|
||||||
assert not pr.reviewed_by
|
|
||||||
|
|
||||||
with repo:
|
|
||||||
c2 = repo.make_commit(c, 'first', None, tree={'m': 'cc'})
|
|
||||||
repo.update_ref(prx.ref, c2, force=True)
|
repo.update_ref(prx.ref, c2, force=True)
|
||||||
assert pr.head == c2
|
assert pr.head == c2
|
||||||
|
|
||||||
|
@pytest.mark.defaultstatuses
|
||||||
def test_update_validated(self, env, repo):
|
def test_update_validated(self, env, repo):
|
||||||
""" Should reset to opened
|
""" Should reset to opened
|
||||||
"""
|
"""
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
[c] = repo.make_commits("master", Commit('first', tree={'m': 'c1'}))
|
||||||
repo.make_ref('heads/master', m)
|
pr = repo.make_pr(target='master', head=c)
|
||||||
|
repo.post_status(c, 'success')
|
||||||
c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'})
|
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
|
||||||
repo.post_status(prx.head, 'success', 'legal/cla')
|
|
||||||
repo.post_status(prx.head, 'success', 'ci/runbot')
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
pr = to_pr(env, prx)
|
|
||||||
assert pr.head == c
|
pr_id = to_pr(env, pr)
|
||||||
assert pr.state == 'validated'
|
assert pr_id.head == c
|
||||||
|
assert pr_id.state == 'validated'
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
c2 = repo.make_commit(m, 'first', None, tree={'m': 'cc'})
|
[c2] = repo.make_commits("master", Commit('first', tree={'m': 'cc'}))
|
||||||
repo.update_ref(prx.ref, c2, force=True)
|
repo.update_ref(pr.ref, c2, force=True)
|
||||||
assert pr.head == c2
|
assert pr_id.head == c2
|
||||||
assert pr.state == 'opened'
|
assert pr_id.state == 'opened'
|
||||||
|
|
||||||
def test_update_approved(self, env, repo, config):
|
def test_update_approved(self, env, repo, config):
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
[c] = repo.make_commits("master", Commit('fist', tree={'m': 'c1'}))
|
||||||
repo.make_ref('heads/master', m)
|
prx = repo.make_pr(target='master', head=c)
|
||||||
|
|
||||||
c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'})
|
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
|
||||||
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
|
|
||||||
pr = to_pr(env, prx)
|
pr = to_pr(env, prx)
|
||||||
@ -2287,22 +2268,19 @@ class TestPRUpdate(object):
|
|||||||
assert pr.state == 'approved'
|
assert pr.state == 'approved'
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
c2 = repo.make_commit(c, 'first', None, tree={'m': 'cc'})
|
[c2] = repo.make_commits("master", Commit('first', tree={'m': 'cc'}))
|
||||||
repo.update_ref(prx.ref, c2, force=True)
|
repo.update_ref(prx.ref, c2, force=True)
|
||||||
assert pr.head == c2
|
assert pr.head == c2
|
||||||
assert pr.state == 'opened'
|
assert pr.state == 'opened'
|
||||||
|
|
||||||
|
@pytest.mark.defaultstatuses
|
||||||
def test_update_ready(self, env, repo, config):
|
def test_update_ready(self, env, repo, config):
|
||||||
""" Should reset to opened
|
""" Should reset to opened
|
||||||
"""
|
"""
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
[c] = repo.make_commits("master", Commit('fist', tree={'m': 'c1'}))
|
||||||
repo.make_ref('heads/master', m)
|
prx = repo.make_pr(target='master', head=c)
|
||||||
|
repo.post_status(prx.head, 'success')
|
||||||
c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'})
|
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
|
||||||
repo.post_status(prx.head, 'success', 'legal/cla')
|
|
||||||
repo.post_status(prx.head, 'success', 'ci/runbot')
|
|
||||||
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
pr = to_pr(env, prx)
|
pr = to_pr(env, prx)
|
||||||
@ -2310,22 +2288,19 @@ class TestPRUpdate(object):
|
|||||||
assert pr.state == 'ready'
|
assert pr.state == 'ready'
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
c2 = repo.make_commit(c, 'first', None, tree={'m': 'cc'})
|
[c2] = repo.make_commits(c, Commit('first', tree={'m': 'cc'}))
|
||||||
repo.update_ref(prx.ref, c2, force=True)
|
repo.update_ref(prx.ref, c2, force=True)
|
||||||
assert pr.head == c2
|
assert pr.head == c2
|
||||||
assert pr.state == 'opened'
|
assert pr.state == 'opened'
|
||||||
|
|
||||||
|
@pytest.mark.defaultstatuses
|
||||||
def test_update_staged(self, env, repo, config):
|
def test_update_staged(self, env, repo, config):
|
||||||
""" Should cancel the staging & reset PR to opened
|
""" Should cancel the staging & reset PR to opened
|
||||||
"""
|
"""
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
[c] = repo.make_commits("master", Commit('fist', tree={'m': 'c1'}))
|
||||||
repo.make_ref('heads/master', m)
|
prx = repo.make_pr(target='master', head=c)
|
||||||
|
repo.post_status(prx.head, 'success')
|
||||||
c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'})
|
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
|
||||||
repo.post_status(prx.head, 'success', 'legal/cla')
|
|
||||||
repo.post_status(prx.head, 'success', 'ci/runbot')
|
|
||||||
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
@ -2334,33 +2309,27 @@ class TestPRUpdate(object):
|
|||||||
assert pr.staging_id
|
assert pr.staging_id
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
c2 = repo.make_commit(c, 'first', None, tree={'m': 'cc'})
|
[c2] = repo.make_commits(c, Commit('first', tree={'m': 'cc'}))
|
||||||
repo.update_ref(prx.ref, c2, force=True)
|
repo.update_ref(prx.ref, c2, force=True)
|
||||||
assert pr.head == c2
|
assert pr.head == c2
|
||||||
assert pr.state == 'opened'
|
assert pr.state == 'opened'
|
||||||
assert not pr.staging_id
|
assert not pr.staging_id
|
||||||
assert not env['runbot_merge.stagings'].search([])
|
assert not env['runbot_merge.stagings'].search([])
|
||||||
|
|
||||||
|
@pytest.mark.defaultstatuses
|
||||||
def test_split(self, env, repo, config):
|
def test_split(self, env, repo, config):
|
||||||
""" Should remove the PR from its split, and possibly delete the split
|
""" Should remove the PR from its split, and possibly delete the split
|
||||||
entirely.
|
entirely.
|
||||||
"""
|
"""
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
repo.make_commits("master", Commit('first', tree={'1': '1'}), ref="heads/p1")
|
||||||
repo.make_ref('heads/master', m)
|
prx1 = repo.make_pr(target='master', head='p1')
|
||||||
|
repo.post_status(prx1.head, 'success')
|
||||||
c = repo.make_commit(m, 'first', None, tree={'m': 'm', '1': '1'})
|
|
||||||
repo.make_ref('heads/p1', c)
|
|
||||||
prx1 = repo.make_pr(title='t1', body='b1', target='master', head='p1')
|
|
||||||
repo.post_status(prx1.head, 'success', 'legal/cla')
|
|
||||||
repo.post_status(prx1.head, 'success', 'ci/runbot')
|
|
||||||
prx1.post_comment('hansen r+', config['role_reviewer']['token'])
|
prx1.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
|
|
||||||
c = repo.make_commit(m, 'first', None, tree={'m': 'm', '2': '2'})
|
[c] = repo.make_commits("master", Commit('first', tree={'2': '2'}), ref="heads/p2")
|
||||||
repo.make_ref('heads/p2', c)
|
prx2 = repo.make_pr(target='master', head='p2')
|
||||||
prx2 = repo.make_pr(title='t2', body='b2', target='master', head='p2')
|
repo.post_status(prx2.head, 'success')
|
||||||
repo.post_status(prx2.head, 'success', 'legal/cla')
|
|
||||||
repo.post_status(prx2.head, 'success', 'ci/runbot')
|
|
||||||
prx2.post_comment('hansen r+', config['role_reviewer']['token'])
|
prx2.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
@ -2371,7 +2340,7 @@ class TestPRUpdate(object):
|
|||||||
s0 = pr1.staging_id
|
s0 = pr1.staging_id
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status('heads/staging.master', 'failure', 'ci/runbot')
|
repo.post_status('heads/staging.master', 'failure')
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
assert pr1.staging_id and pr1.staging_id != s0, "pr1 should have been re-staged"
|
assert pr1.staging_id and pr1.staging_id != s0, "pr1 should have been re-staged"
|
||||||
@ -2381,22 +2350,20 @@ class TestPRUpdate(object):
|
|||||||
assert env['runbot_merge.split'].search([])
|
assert env['runbot_merge.split'].search([])
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
repo.update_ref(prx2.ref, repo.make_commit(c, 'second', None, tree={'m': 'm', '2': '22'}), force=True)
|
[c2] = repo.make_commits(c, Commit('second', tree={'2': '22'}))
|
||||||
|
repo.update_ref(prx2.ref, c2, force=True)
|
||||||
# probably not necessary ATM but...
|
# probably not necessary ATM but...
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
assert pr2.state == 'opened', "state should have been reset"
|
assert pr2.state == 'opened', "state should have been reset"
|
||||||
assert not env['runbot_merge.split'].search([]), "there should be no split left"
|
assert not env['runbot_merge.split'].search([]), "there should be no split left"
|
||||||
|
|
||||||
|
@pytest.mark.defaultstatuses
|
||||||
def test_update_error(self, env, repo, config):
|
def test_update_error(self, env, repo, config):
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
[c] = repo.make_commits("master", Commit('fist', tree={'m': 'c1'}))
|
||||||
repo.make_ref('heads/master', m)
|
prx = repo.make_pr(target='master', head=c)
|
||||||
|
repo.post_status(prx.head, 'success')
|
||||||
c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'})
|
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
|
||||||
repo.post_status(prx.head, 'success', 'legal/cla')
|
|
||||||
repo.post_status(prx.head, 'success', 'ci/runbot')
|
|
||||||
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
pr = to_pr(env, prx)
|
pr = to_pr(env, prx)
|
||||||
@ -2404,24 +2371,25 @@ class TestPRUpdate(object):
|
|||||||
assert pr.staging_id
|
assert pr.staging_id
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.master', 'success', 'legal/cla')
|
repo.post_status('staging.master', 'failure')
|
||||||
repo.post_status('staging.master', 'failure', 'ci/runbot')
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
assert not pr.staging_id
|
assert not pr.staging_id
|
||||||
assert pr.state == 'error'
|
assert pr.state == 'error'
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
c2 = repo.make_commit(c, 'first', None, tree={'m': 'cc'})
|
[c2] = repo.make_commits(c, Commit('first', tree={'m': 'cc'}))
|
||||||
repo.update_ref(prx.ref, c2, force=True)
|
repo.update_ref(prx.ref, c2, force=True)
|
||||||
assert pr.head == c2
|
assert pr.head == c2
|
||||||
assert pr.state == 'opened'
|
assert pr.state == 'opened'
|
||||||
|
|
||||||
def test_unknown_pr(self, env, repo):
|
def test_unknown_pr(self, env, repo):
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
[m, c] = repo.make_commits(
|
||||||
|
None,
|
||||||
|
Commit('initial', tree={'m': 'm'}),
|
||||||
|
Commit('first', tree={'m': 'c1'}),
|
||||||
|
)
|
||||||
repo.make_ref('heads/1.0', m)
|
repo.make_ref('heads/1.0', m)
|
||||||
|
|
||||||
c = repo.make_commit(m, 'first', None, tree={'m': 'c1'})
|
|
||||||
prx = repo.make_pr(title='title', body='body', target='1.0', head=c)
|
prx = repo.make_pr(title='title', body='body', target='1.0', head=c)
|
||||||
assert not env['runbot_merge.pull_requests'].search([('number', '=', prx.number)])
|
assert not env['runbot_merge.pull_requests'].search([('number', '=', prx.number)])
|
||||||
|
|
||||||
@ -2430,27 +2398,24 @@ class TestPRUpdate(object):
|
|||||||
})
|
})
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
c2 = repo.make_commit(c, 'second', None, tree={'m': 'c2'})
|
[c2] = repo.make_commits(c, Commit('second', tree={'m': 'c2'}))
|
||||||
repo.update_ref(prx.ref, c2, force=True)
|
repo.update_ref(prx.ref, c2, force=True)
|
||||||
|
|
||||||
assert not env['runbot_merge.pull_requests'].search([('number', '=', prx.number)])
|
assert not env['runbot_merge.pull_requests'].search([('number', '=', prx.number)])
|
||||||
|
|
||||||
|
@pytest.mark.defaultstatuses
|
||||||
def test_update_to_ci(self, env, repo):
|
def test_update_to_ci(self, env, repo):
|
||||||
""" If a PR is updated to a known-valid commit, it should be
|
""" If a PR is updated to a known-valid commit, it should be
|
||||||
validated
|
validated
|
||||||
"""
|
"""
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
[c] = repo.make_commits("master", Commit('fist', tree={'m': 'c1'}))
|
||||||
repo.make_ref('heads/master', m)
|
[c2] = repo.make_commits("master", Commit('first', tree={'m': 'cc'}))
|
||||||
|
repo.post_status(c2, 'success')
|
||||||
c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'})
|
|
||||||
c2 = repo.make_commit(m, 'first', None, tree={'m': 'cc'})
|
|
||||||
repo.post_status(c2, 'success', 'legal/cla')
|
|
||||||
repo.post_status(c2, 'success', 'ci/runbot')
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
prx = repo.make_pr(target='master', head=c)
|
||||||
pr = to_pr(env, prx)
|
pr = to_pr(env, prx)
|
||||||
assert pr.head == c
|
assert pr.head == c
|
||||||
assert pr.state == 'opened'
|
assert pr.state == 'opened'
|
||||||
@ -2460,6 +2425,7 @@ class TestPRUpdate(object):
|
|||||||
assert pr.head == c2
|
assert pr.head == c2
|
||||||
assert pr.state == 'validated'
|
assert pr.state == 'validated'
|
||||||
|
|
||||||
|
@pytest.mark.defaultstatuses
|
||||||
def test_update_missed(self, env, repo, config, users):
|
def test_update_missed(self, env, repo, config, users):
|
||||||
""" Sometimes github's webhooks don't trigger properly, a branch's HEAD
|
""" Sometimes github's webhooks don't trigger properly, a branch's HEAD
|
||||||
does not get updated and we might e.g. attempt to merge a PR despite it
|
does not get updated and we might e.g. attempt to merge a PR despite it
|
||||||
@ -2479,10 +2445,9 @@ class TestPRUpdate(object):
|
|||||||
repo.make_ref('heads/somethingelse', c)
|
repo.make_ref('heads/somethingelse', c)
|
||||||
|
|
||||||
[c] = repo.make_commits(
|
[c] = repo.make_commits(
|
||||||
'heads/master', repo.Commit('title \n\nbody', tree={'a': '1'}), ref='heads/abranch')
|
'master', repo.Commit('title \n\nbody', tree={'a': '1'}), ref='heads/abranch')
|
||||||
pr = repo.make_pr(target='master', head='abranch')
|
pr = repo.make_pr(target='master', head='abranch')
|
||||||
repo.post_status(pr.head, 'success', 'legal/cla')
|
repo.post_status(pr.head, 'success')
|
||||||
repo.post_status(pr.head, 'success', 'ci/runbot')
|
|
||||||
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
@ -2498,8 +2463,7 @@ class TestPRUpdate(object):
|
|||||||
# to the PR *actually* having more than 1 commit and thus needing
|
# to the PR *actually* having more than 1 commit and thus needing
|
||||||
# a configuration
|
# a configuration
|
||||||
[c2] = repo.make_commits('heads/master', repo.Commit('c2', tree={'a': '2'}))
|
[c2] = repo.make_commits('heads/master', repo.Commit('c2', tree={'a': '2'}))
|
||||||
repo.post_status(c2, 'success', 'legal/cla')
|
repo.post_status(c2, 'success')
|
||||||
repo.post_status(c2, 'success', 'ci/runbot')
|
|
||||||
repo.update_ref(pr.ref, c2, force=True)
|
repo.update_ref(pr.ref, c2, force=True)
|
||||||
|
|
||||||
other = env['runbot_merge.branch'].create({
|
other = env['runbot_merge.branch'].create({
|
||||||
@ -2608,61 +2572,114 @@ Please check and re-approve.
|
|||||||
f"Updated target, squash, message. Updated {pr_id.display_name} to ready. Updated to {c2}."
|
f"Updated target, squash, message. Updated {pr_id.display_name} to ready. Updated to {c2}."
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_update_closed(self, env, repo):
|
@pytest.mark.defaultstatuses
|
||||||
|
def test_update_closed(self, env, repo, config):
|
||||||
with repo:
|
with repo:
|
||||||
[m] = repo.make_commits(None, repo.Commit('initial', tree={'m': 'm'}), ref='heads/master')
|
[c] = repo.make_commits("master", repo.Commit('first', tree={'m': 'm3'}), ref='heads/abranch')
|
||||||
|
pr = repo.make_pr(target='master', head=c)
|
||||||
[c] = repo.make_commits(m, repo.Commit('first', tree={'m': 'm3'}), ref='heads/abranch')
|
pr.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
pr = to_pr(env, prx)
|
|
||||||
assert pr.state == 'opened'
|
pr_id = to_pr(env, pr)
|
||||||
assert pr.head == c
|
assert pr_id.state == 'approved'
|
||||||
assert pr.squash
|
assert pr_id.head == c
|
||||||
|
assert pr_id.squash
|
||||||
|
assert pr_id.reviewed_by
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
prx.close()
|
pr.close()
|
||||||
|
|
||||||
with repo:
|
|
||||||
c2 = repo.make_commit(c, 'xxx', None, tree={'m': 'm4'})
|
|
||||||
repo.update_ref(prx.ref, c2)
|
|
||||||
|
|
||||||
assert pr.state == 'closed'
|
assert pr.state == 'closed'
|
||||||
assert pr.head == c
|
assert pr.head == c
|
||||||
assert pr.squash
|
assert not pr_id.reviewed_by
|
||||||
|
assert pr_id.squash
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
prx.open()
|
[c2] = repo.make_commits(c, Commit('xxx', tree={'m': 'm4'}))
|
||||||
assert pr.state == 'opened'
|
repo.update_ref(pr.ref, c2)
|
||||||
assert pr.head == c2
|
repo.post_status(c2, "success")
|
||||||
assert not pr.squash
|
|
||||||
|
|
||||||
def test_update_closed_revalidate(self, env, repo):
|
assert pr_id.state == 'closed'
|
||||||
""" The PR should be validated on opening and reopening in case there's
|
assert pr_id.head == c
|
||||||
already a CI+ stored (as the CI might never trigger unless explicitly
|
assert not pr_id.reviewed_by
|
||||||
re-requested)
|
assert pr_id.squash
|
||||||
|
|
||||||
|
with repo:
|
||||||
|
pr.open()
|
||||||
|
assert pr_id.state == 'validated'
|
||||||
|
assert pr_id.head == c2
|
||||||
|
assert not pr_id.reviewed_by
|
||||||
|
assert not pr_id.squash
|
||||||
|
|
||||||
|
@pytest.mark.defaultstatuses
|
||||||
|
def test_update_incorrect_commits_count(self, port, env, project, repo, config, users):
|
||||||
|
"""This is not a great test but it aims to kinda sorta simulate the
|
||||||
|
behaviour when a user retargets and updates a PR at about the same time:
|
||||||
|
github can send the hooks in the wrong order, which leads to the correct
|
||||||
|
base and head but can lead to the wrong squash status.
|
||||||
"""
|
"""
|
||||||
|
project.write({
|
||||||
|
'branch_ids': [(0, 0, {
|
||||||
|
'name': 'xxx',
|
||||||
|
})]
|
||||||
|
})
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'m': 'm'})
|
[c] = repo.make_commits("master", Commit("c", tree={"m": "n"}), ref="heads/thing")
|
||||||
repo.make_ref('heads/master', m)
|
pr = repo.make_pr(target='master', head='thing')
|
||||||
|
|
||||||
c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'})
|
pr_id = to_pr(env, pr)
|
||||||
repo.post_status(c, 'success', 'legal/cla')
|
pr_id.head = '0'*40
|
||||||
repo.post_status(c, 'success', 'ci/runbot')
|
with requests.Session() as s:
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
r = s.post(
|
||||||
|
f"http://localhost:{port}/runbot_merge/hooks",
|
||||||
|
headers={
|
||||||
|
"X-Github-Event": "pull_request",
|
||||||
|
},
|
||||||
|
json={
|
||||||
|
'action': 'synchronize',
|
||||||
|
'sender': {
|
||||||
|
'login': users['user'],
|
||||||
|
},
|
||||||
|
'repository': {
|
||||||
|
'full_name': repo.name,
|
||||||
|
},
|
||||||
|
'pull_request': {
|
||||||
|
'number': pr.number,
|
||||||
|
'head': {'sha': c},
|
||||||
|
'title': "c",
|
||||||
|
'commits': 40123,
|
||||||
|
'base': {
|
||||||
|
'ref': 'xxx',
|
||||||
|
'repo': {
|
||||||
|
'full_name': repo.name,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
assert pr_id.head == c, "the head should have been updated"
|
||||||
|
assert not pr_id.squash, "the wrong count should be used"
|
||||||
|
|
||||||
|
with repo:
|
||||||
|
pr.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||||
|
repo.post_status(c, 'success')
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
pr = to_pr(env, prx)
|
assert not pr_id.blocked
|
||||||
assert pr.state == 'validated', \
|
assert pr_id.message_ids[::-1].mapped(lambda m: (
|
||||||
"if a PR is created on a CI'd commit, it should be validated immediately"
|
((m.subject or '') + '\n\n' + m.body).strip(),
|
||||||
|
list(map(read_tracking_value, m.tracking_value_ids)),
|
||||||
with repo: prx.close()
|
)) == [
|
||||||
assert pr.state == 'closed'
|
('<p>Pull Request created</p>', []),
|
||||||
|
('', [('head', c, '0'*40)]),
|
||||||
with repo: prx.open()
|
('', [('head', '0'*40, c), ('squash', 1, 0)]),
|
||||||
assert pr.state == 'validated', \
|
('', [('state', 'Opened', 'Approved'), ('reviewed_by', '', 'Reviewer')]),
|
||||||
"if a PR is reopened and had a CI'd head, it should be validated immediately"
|
(f'<p>statuses changed on {c}</p>', [('state', 'Approved', 'Ready')]),
|
||||||
|
]
|
||||||
|
assert pr_id.staging_id
|
||||||
|
with repo:
|
||||||
|
repo.post_status('staging.master', 'success')
|
||||||
|
env.run_crons()
|
||||||
|
assert pr_id.merge_date
|
||||||
|
|
||||||
class TestBatching(object):
|
class TestBatching(object):
|
||||||
def _pr(self, repo, prefix, trees, *, target='master', user, reviewer,
|
def _pr(self, repo, prefix, trees, *, target='master', user, reviewer,
|
||||||
@ -2982,7 +2999,6 @@ class TestBatching(object):
|
|||||||
pr0.post_comment('hansen NOW!', config['role_reviewer']['token'])
|
pr0.post_comment('hansen NOW!', config['role_reviewer']['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
# TODO: maybe just deactivate stagings instead of deleting them when canceling?
|
|
||||||
assert not p_1.staging_id
|
assert not p_1.staging_id
|
||||||
assert to_pr(env, pr0).staging_id
|
assert to_pr(env, pr0).staging_id
|
||||||
|
|
||||||
@ -3405,15 +3421,15 @@ class TestUnknownPR:
|
|||||||
|
|
||||||
c = env['runbot_merge.commit'].search([('sha', '=', prx.head)])
|
c = env['runbot_merge.commit'].search([('sha', '=', prx.head)])
|
||||||
assert json.loads(c.statuses) == {
|
assert json.loads(c.statuses) == {
|
||||||
'legal/cla': {'state': 'success', 'target_url': None, 'description': None},
|
'legal/cla': {'state': 'success', 'target_url': None, 'description': None, 'updated_at': matches("$$")},
|
||||||
'ci/runbot': {'state': 'success', 'target_url': 'http://example.org/wheee', 'description': None}
|
'ci/runbot': {'state': 'success', 'target_url': 'http://example.org/wheee', 'description': None, 'updated_at': matches("$$")}
|
||||||
}
|
}
|
||||||
assert prx.comments == [
|
assert prx.comments == [
|
||||||
seen(env, prx, users),
|
seen(env, prx, users),
|
||||||
(users['reviewer'], 'hansen r+'),
|
(users['reviewer'], 'hansen r+'),
|
||||||
(users['reviewer'], 'hansen r+'),
|
(users['reviewer'], 'hansen r+'),
|
||||||
seen(env, prx, users),
|
seen(env, prx, users),
|
||||||
(users['user'], f"@{users['user']} I didn't know about this PR and had to "
|
(users['user'], f"@{users['reviewer']} I didn't know about this PR and had to "
|
||||||
"retrieve its information, you may have to "
|
"retrieve its information, you may have to "
|
||||||
"re-approve it as I didn't see previous commands."),
|
"re-approve it as I didn't see previous commands."),
|
||||||
]
|
]
|
||||||
@ -3469,7 +3485,7 @@ class TestUnknownPR:
|
|||||||
# reviewer is set because fetch replays all the comments (thus
|
# reviewer is set because fetch replays all the comments (thus
|
||||||
# setting r+ and reviewer) but then syncs the head commit thus
|
# setting r+ and reviewer) but then syncs the head commit thus
|
||||||
# unsetting r+ but leaving the reviewer
|
# unsetting r+ but leaving the reviewer
|
||||||
(users['user'], f"@{users['user']} I didn't know about this PR and had to retrieve "
|
(users['user'], f"@{users['reviewer']} I didn't know about this PR and had to retrieve "
|
||||||
"its information, you may have to re-approve it "
|
"its information, you may have to re-approve it "
|
||||||
"as I didn't see previous commands."),
|
"as I didn't see previous commands."),
|
||||||
]
|
]
|
||||||
|
@ -149,7 +149,6 @@ def test_pseudo_version_tag(env, project, make_repo, setreviewers, config):
|
|||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.master', 'success', 'ci')
|
repo.post_status('staging.master', 'success', 'ci')
|
||||||
env.run_crons() # should merge staging
|
env.run_crons() # should merge staging
|
||||||
env.run_crons('runbot_merge.labels_cron') # update labels
|
|
||||||
assert pr_id.state == 'merged'
|
assert pr_id.state == 'merged'
|
||||||
assert pr.labels >= {'2.1'}
|
assert pr.labels >= {'2.1'}
|
||||||
|
|
||||||
@ -170,6 +169,5 @@ def test_pseudo_version_tag(env, project, make_repo, setreviewers, config):
|
|||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.master', 'success', 'ci')
|
repo.post_status('staging.master', 'success', 'ci')
|
||||||
env.run_crons() # should merge staging
|
env.run_crons() # should merge staging
|
||||||
env.run_crons('runbot_merge.labels_cron') # update labels
|
|
||||||
assert pr_id.state == 'merged'
|
assert pr_id.state == 'merged'
|
||||||
assert pr.labels >= {'post-bonk'}
|
assert pr.labels >= {'post-bonk'}
|
||||||
|
@ -1,8 +1,11 @@
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from utils import seen, Commit, pr_page
|
from utils import seen, Commit, pr_page, to_pr
|
||||||
|
|
||||||
def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, config, users, page):
|
|
||||||
|
pytestmark = pytest.mark.defaultstatuses
|
||||||
|
|
||||||
|
def test_existing_pr_disabled_branch(env, project, repo, config, users, page):
|
||||||
""" PRs to disabled branches are ignored, but what if the PR exists *before*
|
""" PRs to disabled branches are ignored, but what if the PR exists *before*
|
||||||
the branch is disabled?
|
the branch is disabled?
|
||||||
"""
|
"""
|
||||||
@ -10,20 +13,11 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
|
|||||||
# new work
|
# new work
|
||||||
assert env['base'].run_crons()
|
assert env['base'].run_crons()
|
||||||
|
|
||||||
repo = make_repo('repo')
|
|
||||||
project.branch_ids.sequence = 0
|
|
||||||
project.write({'branch_ids': [
|
project.write({'branch_ids': [
|
||||||
|
(1, project.branch_ids.id, {'sequence': 0}),
|
||||||
(0, 0, {'name': 'other', 'sequence': 1}),
|
(0, 0, {'name': 'other', 'sequence': 1}),
|
||||||
(0, 0, {'name': 'other2', 'sequence': 2}),
|
(0, 0, {'name': 'other2', 'sequence': 2}),
|
||||||
]})
|
]})
|
||||||
repo_id = env['runbot_merge.repository'].create({
|
|
||||||
'project_id': project.id,
|
|
||||||
'name': repo.name,
|
|
||||||
'status_ids': [(0, 0, {'context': 'status'})],
|
|
||||||
'group_id': False,
|
|
||||||
})
|
|
||||||
setreviewers(*project.repo_ids)
|
|
||||||
env['runbot_merge.events_sources'].create({'repository': repo.name})
|
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
[m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master')
|
[m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master')
|
||||||
@ -32,14 +26,11 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
|
|||||||
|
|
||||||
[c] = repo.make_commits(ot, Commit('wheee', tree={'b': '2'}))
|
[c] = repo.make_commits(ot, Commit('wheee', tree={'b': '2'}))
|
||||||
pr = repo.make_pr(title="title", body='body', target='other', head=c)
|
pr = repo.make_pr(title="title", body='body', target='other', head=c)
|
||||||
repo.post_status(c, 'success', 'status')
|
repo.post_status(c, 'success')
|
||||||
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
pr_id = env['runbot_merge.pull_requests'].search([
|
pr_id = to_pr(env, pr)
|
||||||
('repository', '=', repo_id.id),
|
|
||||||
('number', '=', pr.number),
|
|
||||||
])
|
|
||||||
branch_id = pr_id.target
|
branch_id = pr_id.target
|
||||||
assert pr_id.staging_id
|
assert pr_id.staging_id
|
||||||
staging_id = branch_id.active_staging_id
|
staging_id = branch_id.active_staging_id
|
||||||
@ -47,9 +38,6 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
|
|||||||
|
|
||||||
# staging of `pr` should have generated a staging branch
|
# staging of `pr` should have generated a staging branch
|
||||||
_ = repo.get_ref('heads/staging.other')
|
_ = repo.get_ref('heads/staging.other')
|
||||||
# stagings should not need a tmp branch anymore, so this should not exist
|
|
||||||
with pytest.raises(AssertionError, match=r'Not Found'):
|
|
||||||
repo.get_ref('heads/tmp.other')
|
|
||||||
|
|
||||||
# disable branch "other"
|
# disable branch "other"
|
||||||
branch_id.active = False
|
branch_id.active = False
|
||||||
@ -74,7 +62,7 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
|
|||||||
[target] = p.cssselect('table tr.bg-info')
|
[target] = p.cssselect('table tr.bg-info')
|
||||||
assert 'inactive' in target.classes
|
assert 'inactive' in target.classes
|
||||||
assert target[0].text_content() == "other"
|
assert target[0].text_content() == "other"
|
||||||
|
env.run_crons()
|
||||||
assert pr.comments == [
|
assert pr.comments == [
|
||||||
(users['reviewer'], "hansen r+"),
|
(users['reviewer'], "hansen r+"),
|
||||||
seen(env, pr, users),
|
seen(env, pr, users),
|
||||||
@ -82,37 +70,38 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
|
|||||||
]
|
]
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
[c2] = repo.make_commits(ot, Commit('wheee', tree={'b': '3'}))
|
[c2] = repo.make_commits(ot, Commit('wheee', tree={'b': '3'}), ref=pr.ref, make=False)
|
||||||
repo.update_ref(pr.ref, c2, force=True)
|
env.run_crons()
|
||||||
|
assert pr.comments[3] == (
|
||||||
|
users['user'],
|
||||||
|
"This PR targets the disabled branch {repository}:{target}, it needs to be retargeted before it can be merged.".format(
|
||||||
|
repository=repo.name,
|
||||||
|
target="other",
|
||||||
|
)
|
||||||
|
)
|
||||||
assert pr_id.head == c2, "pr should be aware of its update"
|
assert pr_id.head == c2, "pr should be aware of its update"
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
pr.base = 'other2'
|
pr.base = 'other2'
|
||||||
repo.post_status(c2, 'success', 'status')
|
repo.post_status(c2, 'success')
|
||||||
pr.post_comment('hansen rebase-ff r+', config['role_reviewer']['token'])
|
pr.post_comment('hansen rebase-ff r+', config['role_reviewer']['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
|
assert pr.comments[4:] == [
|
||||||
|
(users['reviewer'], 'hansen rebase-ff r+'),
|
||||||
|
(users['user'], "Merge method set to rebase and fast-forward."),
|
||||||
|
]
|
||||||
|
|
||||||
assert pr_id.state == 'ready'
|
assert pr_id.state == 'ready'
|
||||||
assert pr_id.target == env['runbot_merge.branch'].search([('name', '=', 'other2')])
|
assert pr_id.target == env['runbot_merge.branch'].search([('name', '=', 'other2')])
|
||||||
assert pr_id.staging_id
|
assert pr_id.staging_id
|
||||||
|
|
||||||
# staging of `pr` should have generated a staging branch
|
# staging of `pr` should have generated a staging branch
|
||||||
_ = repo.get_ref('heads/staging.other2')
|
_ = repo.get_ref('heads/staging.other2')
|
||||||
# stagings should not need a tmp branch anymore, so this should not exist
|
|
||||||
with pytest.raises(AssertionError, match=r'Not Found'):
|
|
||||||
repo.get_ref('heads/tmp.other2')
|
|
||||||
|
|
||||||
def test_new_pr_no_branch(env, project, make_repo, setreviewers, users):
|
def test_new_pr_no_branch(env, project, repo, users):
|
||||||
""" A new PR to an *unknown* branch should be ignored and warn
|
""" A new PR to an *unknown* branch should be ignored and warn
|
||||||
"""
|
"""
|
||||||
repo = make_repo('repo')
|
|
||||||
repo_id = env['runbot_merge.repository'].create({
|
|
||||||
'project_id': project.id,
|
|
||||||
'name': repo.name,
|
|
||||||
'status_ids': [(0, 0, {'context': 'status'})]
|
|
||||||
})
|
|
||||||
setreviewers(*project.repo_ids)
|
|
||||||
env['runbot_merge.events_sources'].create({'repository': repo.name})
|
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
[m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master')
|
[m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master')
|
||||||
@ -123,30 +112,18 @@ def test_new_pr_no_branch(env, project, make_repo, setreviewers, users):
|
|||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
assert not env['runbot_merge.pull_requests'].search([
|
assert not env['runbot_merge.pull_requests'].search([
|
||||||
('repository', '=', repo_id.id),
|
('repository', '=', project.repo_ids.id),
|
||||||
('number', '=', pr.number),
|
('number', '=', pr.number),
|
||||||
]), "the PR should not have been created in the backend"
|
]), "the PR should not have been created in the backend"
|
||||||
assert pr.comments == [
|
assert pr.comments == [
|
||||||
(users['user'], "This PR targets the un-managed branch %s:other, it needs to be retargeted before it can be merged." % repo.name),
|
(users['user'], "This PR targets the un-managed branch %s:other, it needs to be retargeted before it can be merged." % repo.name),
|
||||||
]
|
]
|
||||||
|
|
||||||
def test_new_pr_disabled_branch(env, project, make_repo, setreviewers, users):
|
def test_new_pr_disabled_branch(env, project, repo, users):
|
||||||
""" A new PR to a *disabled* branch should be accepted (rather than ignored)
|
""" A new PR to a *disabled* branch should be accepted (rather than ignored)
|
||||||
but should warn
|
but should warn
|
||||||
"""
|
"""
|
||||||
repo = make_repo('repo')
|
project.write({'branch_ids': [(0, 0, {'name': 'other', 'active': False})]})
|
||||||
repo_id = env['runbot_merge.repository'].create({
|
|
||||||
'project_id': project.id,
|
|
||||||
'name': repo.name,
|
|
||||||
'status_ids': [(0, 0, {'context': 'status'})]
|
|
||||||
})
|
|
||||||
env['runbot_merge.branch'].create({
|
|
||||||
'project_id': project.id,
|
|
||||||
'name': 'other',
|
|
||||||
'active': False,
|
|
||||||
})
|
|
||||||
setreviewers(*project.repo_ids)
|
|
||||||
env['runbot_merge.events_sources'].create({'repository': repo.name})
|
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
[m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master')
|
[m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master')
|
||||||
@ -156,13 +133,40 @@ def test_new_pr_disabled_branch(env, project, make_repo, setreviewers, users):
|
|||||||
pr = repo.make_pr(title="title", body='body', target='other', head=c)
|
pr = repo.make_pr(title="title", body='body', target='other', head=c)
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
pr_id = env['runbot_merge.pull_requests'].search([
|
pr_id = to_pr(env, pr)
|
||||||
('repository', '=', repo_id.id),
|
|
||||||
('number', '=', pr.number),
|
|
||||||
])
|
|
||||||
assert pr_id, "the PR should have been created in the backend"
|
assert pr_id, "the PR should have been created in the backend"
|
||||||
assert pr_id.state == 'opened'
|
assert pr_id.state == 'opened'
|
||||||
assert pr.comments == [
|
assert pr.comments == [
|
||||||
(users['user'], "This PR targets the disabled branch %s:other, it needs to be retargeted before it can be merged." % repo.name),
|
(users['user'], "This PR targets the disabled branch %s:other, it needs to be retargeted before it can be merged." % repo.name),
|
||||||
seen(env, pr, users),
|
seen(env, pr, users),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def test_review_disabled_branch(env, project, repo, users, config):
|
||||||
|
with repo:
|
||||||
|
[m] = repo.make_commits(None, Commit("init", tree={'m': 'm'}), ref='heads/master')
|
||||||
|
|
||||||
|
[c] = repo.make_commits(m, Commit('pr', tree={'m': 'n'}))
|
||||||
|
pr = repo.make_pr(target="master", head=c)
|
||||||
|
env.run_crons()
|
||||||
|
target = project.branch_ids
|
||||||
|
target.active = False
|
||||||
|
env.run_crons()
|
||||||
|
with repo:
|
||||||
|
pr.post_comment("A normal comment", config['role_other']['token'])
|
||||||
|
with repo:
|
||||||
|
pr.post_comment("hansen r+", config['role_reviewer']['token'])
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
assert pr.comments == [
|
||||||
|
seen(env, pr, users),
|
||||||
|
(users['user'], "@{user} the target branch {target!r} has been disabled, you may want to close this PR.".format(
|
||||||
|
**users,
|
||||||
|
target=target.name,
|
||||||
|
)),
|
||||||
|
(users['other'], "A normal comment"),
|
||||||
|
(users['reviewer'], "hansen r+"),
|
||||||
|
(users['user'], "This PR targets the disabled branch {repository}:{target}, it needs to be retargeted before it can be merged.".format(
|
||||||
|
repository=repo.name,
|
||||||
|
target=target.name,
|
||||||
|
)),
|
||||||
|
]
|
||||||
|
@ -806,10 +806,12 @@ class TestBlocked:
|
|||||||
|
|
||||||
p = to_pr(env, pr)
|
p = to_pr(env, pr)
|
||||||
assert p.state == 'ready'
|
assert p.state == 'ready'
|
||||||
assert p.blocked
|
assert not p.blocked
|
||||||
|
assert not p.staging_id
|
||||||
|
|
||||||
with repo_a: pr.post_comment('hansen rebase-merge', config['role_reviewer']['token'])
|
with repo_a: pr.post_comment('hansen rebase-merge', config['role_reviewer']['token'])
|
||||||
assert not p.blocked
|
env.run_crons()
|
||||||
|
assert p.staging_id
|
||||||
|
|
||||||
def test_linked_closed(self, env, repo_a, repo_b, config):
|
def test_linked_closed(self, env, repo_a, repo_b, config):
|
||||||
with repo_a, repo_b:
|
with repo_a, repo_b:
|
||||||
|
@ -261,7 +261,7 @@ def test_force_ready(env, repo, config):
|
|||||||
pr_id.skipchecks = True
|
pr_id.skipchecks = True
|
||||||
|
|
||||||
assert pr_id.state == 'ready'
|
assert pr_id.state == 'ready'
|
||||||
assert pr_id.status == 'pending'
|
assert pr_id.status == 'success'
|
||||||
reviewer = env['res.users'].browse([env._uid]).partner_id
|
reviewer = env['res.users'].browse([env._uid]).partner_id
|
||||||
assert pr_id.reviewed_by == reviewer
|
assert pr_id.reviewed_by == reviewer
|
||||||
|
|
||||||
@ -349,6 +349,7 @@ Currently available commands for @{user}:
|
|||||||
|-|-|
|
|-|-|
|
||||||
|`help`|displays this help|
|
|`help`|displays this help|
|
||||||
|`fw=no`|does not forward-port this PR|
|
|`fw=no`|does not forward-port this PR|
|
||||||
|
|`fw=default`|forward-ports this PR normally|
|
||||||
|`up to <branch>`|only ports this PR forward to the specified branch (included)|
|
|`up to <branch>`|only ports this PR forward to the specified branch (included)|
|
||||||
|`check`|fetches or refreshes PR metadata, resets mergebot state|
|
|`check`|fetches or refreshes PR metadata, resets mergebot state|
|
||||||
|
|
||||||
|
262
runbot_merge/tests/test_patching.py
Normal file
262
runbot_merge/tests/test_patching.py
Normal file
@ -0,0 +1,262 @@
|
|||||||
|
import xmlrpc.client
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from utils import Commit, read_tracking_value, matches
|
||||||
|
|
||||||
|
# basic udiff / show style patch, updates `b` from `1` to `2`
|
||||||
|
BASIC_UDIFF = """\
|
||||||
|
commit 0000000000000000000000000000000000000000
|
||||||
|
Author: 3 Discos Down <bar@example.org>
|
||||||
|
Date: 2021-04-24T17:09:14Z
|
||||||
|
|
||||||
|
whop
|
||||||
|
|
||||||
|
whop whop
|
||||||
|
|
||||||
|
diff --git a/b b/b
|
||||||
|
index 000000000000..000000000000 100644
|
||||||
|
--- a/b
|
||||||
|
+++ b/b
|
||||||
|
@@ -1,1 +1,1 @@
|
||||||
|
-1
|
||||||
|
+2
|
||||||
|
"""
|
||||||
|
|
||||||
|
FORMAT_PATCH_XMO = """\
|
||||||
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||||
|
From: 3 Discos Down <bar@example.org>
|
||||||
|
Date: Sat, 24 Apr 2021 17:09:14 +0000
|
||||||
|
Subject: [PATCH] [I18N] whop
|
||||||
|
|
||||||
|
whop whop
|
||||||
|
---
|
||||||
|
b | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/b b/b
|
||||||
|
index 000000000000..000000000000 100644
|
||||||
|
--- a/b
|
||||||
|
+++ b/b
|
||||||
|
@@ -1,1 +1,1 @@
|
||||||
|
-1
|
||||||
|
+2
|
||||||
|
--
|
||||||
|
2.46.2
|
||||||
|
"""
|
||||||
|
|
||||||
|
# slightly different format than the one I got, possibly because older?
|
||||||
|
FORMAT_PATCH_MAT = """\
|
||||||
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||||
|
From: 3 Discos Down <bar@example.org>
|
||||||
|
Date: Sat, 24 Apr 2021 17:09:14 +0000
|
||||||
|
Subject: [PATCH 1/1] [I18N] whop
|
||||||
|
|
||||||
|
whop whop
|
||||||
|
---
|
||||||
|
b | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git b b
|
||||||
|
index 000000000000..000000000000 100644
|
||||||
|
--- b
|
||||||
|
+++ b
|
||||||
|
@@ -1,1 +1,1 @@
|
||||||
|
-1
|
||||||
|
+2
|
||||||
|
--
|
||||||
|
2.34.1
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def _setup(repo):
|
||||||
|
with repo:
|
||||||
|
[c, _] = repo.make_commits(
|
||||||
|
None,
|
||||||
|
Commit("a", tree={"a": "1", "b": "1\n"}),
|
||||||
|
Commit("b", tree={"a": "2"}),
|
||||||
|
ref="heads/master",
|
||||||
|
)
|
||||||
|
repo.make_ref("heads/x", c)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("group,access", [
|
||||||
|
('base.group_portal', False),
|
||||||
|
('base.group_user', False),
|
||||||
|
('runbot_merge.group_patcher', True),
|
||||||
|
('runbot_merge.group_admin', False),
|
||||||
|
('base.group_system', True),
|
||||||
|
])
|
||||||
|
def test_patch_acl(env, project, group, access):
|
||||||
|
g = env.ref(group)
|
||||||
|
assert g._name == 'res.groups'
|
||||||
|
env['res.users'].create({
|
||||||
|
'name': 'xxx',
|
||||||
|
'login': 'xxx',
|
||||||
|
'password': 'xxx',
|
||||||
|
'groups_id': [(6, 0, [g.id])],
|
||||||
|
})
|
||||||
|
env2 = env.with_user('xxx', 'xxx')
|
||||||
|
def create():
|
||||||
|
return env2['runbot_merge.patch'].create({
|
||||||
|
'target': project.branch_ids.id,
|
||||||
|
'repository': project.repo_ids.id,
|
||||||
|
'patch': BASIC_UDIFF,
|
||||||
|
})
|
||||||
|
if access:
|
||||||
|
create()
|
||||||
|
else:
|
||||||
|
pytest.raises(xmlrpc.client.Fault, create)\
|
||||||
|
.match("You are not allowed to create")
|
||||||
|
|
||||||
|
def test_apply_commit(env, project, repo, users):
|
||||||
|
with repo:
|
||||||
|
[c] = repo.make_commits("x", Commit("c", tree={"b": "2"}, author={
|
||||||
|
'name': "Henry Hoover",
|
||||||
|
"email": "dustsuckinghose@example.org",
|
||||||
|
}), ref="heads/abranch")
|
||||||
|
repo.delete_ref('heads/abranch')
|
||||||
|
|
||||||
|
p = env['runbot_merge.patch'].create({
|
||||||
|
'target': project.branch_ids.id,
|
||||||
|
'repository': project.repo_ids.id,
|
||||||
|
'commit': c,
|
||||||
|
})
|
||||||
|
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
HEAD = repo.commit('master')
|
||||||
|
assert repo.read_tree(HEAD) == {
|
||||||
|
'a': '2',
|
||||||
|
'b': '2',
|
||||||
|
}
|
||||||
|
assert HEAD.message == "c"
|
||||||
|
assert HEAD.author['name'] == "Henry Hoover"
|
||||||
|
assert HEAD.author['email'] == "dustsuckinghose@example.org"
|
||||||
|
assert not p.active
|
||||||
|
|
||||||
|
def test_commit_conflict(env, project, repo, users):
|
||||||
|
with repo:
|
||||||
|
[c] = repo.make_commits("x", Commit("x", tree={"b": "3"}))
|
||||||
|
repo.make_commits("master", Commit("c", tree={"b": "2"}), ref="heads/master", make=False)
|
||||||
|
|
||||||
|
p = env['runbot_merge.patch'].create({
|
||||||
|
'target': project.branch_ids.id,
|
||||||
|
'repository': project.repo_ids.id,
|
||||||
|
'commit': c,
|
||||||
|
})
|
||||||
|
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
HEAD = repo.commit('master')
|
||||||
|
assert repo.read_tree(HEAD) == {
|
||||||
|
'a': '2',
|
||||||
|
'b': '2',
|
||||||
|
}
|
||||||
|
assert not p.active
|
||||||
|
assert [(
|
||||||
|
m.subject,
|
||||||
|
m.body,
|
||||||
|
list(map(read_tracking_value, m.tracking_value_ids)),
|
||||||
|
)
|
||||||
|
for m in reversed(p.message_ids)
|
||||||
|
] == [
|
||||||
|
(False, '<p>Unstaged direct-application patch created</p>', []),
|
||||||
|
(
|
||||||
|
"Unable to apply patch",
|
||||||
|
"""\
|
||||||
|
<p>Auto-merging b<br>\
|
||||||
|
CONFLICT (content): Merge conflict in b<br></p>\
|
||||||
|
""",
|
||||||
|
[],
|
||||||
|
),
|
||||||
|
(False, '', [('active', 1, 0)]),
|
||||||
|
]
|
||||||
|
|
||||||
|
def test_apply_udiff(env, project, repo, users):
|
||||||
|
p = env['runbot_merge.patch'].create({
|
||||||
|
'target': project.branch_ids.id,
|
||||||
|
'repository': project.repo_ids.id,
|
||||||
|
'patch': BASIC_UDIFF,
|
||||||
|
})
|
||||||
|
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
HEAD = repo.commit('master')
|
||||||
|
assert repo.read_tree(HEAD) == {
|
||||||
|
'a': '2',
|
||||||
|
'b': '2\n',
|
||||||
|
}
|
||||||
|
assert HEAD.message == "whop\n\nwhop whop"
|
||||||
|
assert HEAD.author['name'] == "3 Discos Down"
|
||||||
|
assert HEAD.author['email'] == "bar@example.org"
|
||||||
|
assert not p.active
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('patch', [
|
||||||
|
pytest.param(FORMAT_PATCH_XMO, id='xmo'),
|
||||||
|
pytest.param(FORMAT_PATCH_MAT, id='mat'),
|
||||||
|
])
|
||||||
|
def test_apply_format_patch(env, project, repo, users, patch):
|
||||||
|
p = env['runbot_merge.patch'].create({
|
||||||
|
'target': project.branch_ids.id,
|
||||||
|
'repository': project.repo_ids.id,
|
||||||
|
'patch': patch,
|
||||||
|
})
|
||||||
|
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
bot = env['res.users'].browse((1,))
|
||||||
|
assert p.message_ids[::-1].mapped(lambda m: (
|
||||||
|
m.author_id.display_name,
|
||||||
|
m.body,
|
||||||
|
list(map(read_tracking_value, m.tracking_value_ids)),
|
||||||
|
)) == [
|
||||||
|
(p.create_uid.partner_id.display_name, '<p>Unstaged direct-application patch created</p>', []),
|
||||||
|
(bot.partner_id.display_name, "", [('active', 1, 0)]),
|
||||||
|
]
|
||||||
|
HEAD = repo.commit('master')
|
||||||
|
assert repo.read_tree(HEAD) == {
|
||||||
|
'a': '2',
|
||||||
|
'b': '2\n',
|
||||||
|
}
|
||||||
|
assert HEAD.message == "[I18N] whop\n\nwhop whop"
|
||||||
|
assert HEAD.author['name'] == "3 Discos Down"
|
||||||
|
assert HEAD.author['email'] == "bar@example.org"
|
||||||
|
assert not p.active
|
||||||
|
|
||||||
|
def test_patch_conflict(env, project, repo, users):
|
||||||
|
p = env['runbot_merge.patch'].create({
|
||||||
|
'target': project.branch_ids.id,
|
||||||
|
'repository': project.repo_ids.id,
|
||||||
|
'patch': BASIC_UDIFF,
|
||||||
|
})
|
||||||
|
with repo:
|
||||||
|
repo.make_commits('master', Commit('cccombo breaker', tree={'b': '3'}), ref='heads/master', make=False)
|
||||||
|
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
HEAD = repo.commit('master')
|
||||||
|
assert HEAD.message == 'cccombo breaker'
|
||||||
|
assert repo.read_tree(HEAD) == {
|
||||||
|
'a': '2',
|
||||||
|
'b': '3',
|
||||||
|
}
|
||||||
|
assert not p.active
|
||||||
|
assert [(
|
||||||
|
m.subject,
|
||||||
|
m.body,
|
||||||
|
list(map(read_tracking_value, m.tracking_value_ids)),
|
||||||
|
)
|
||||||
|
for m in reversed(p.message_ids)
|
||||||
|
] == [(
|
||||||
|
False,
|
||||||
|
'<p>Unstaged direct-application patch created</p>',
|
||||||
|
[],
|
||||||
|
), (
|
||||||
|
"Unable to apply patch",
|
||||||
|
matches("$$"), # feedback from patch can vary
|
||||||
|
[],
|
||||||
|
), (
|
||||||
|
False, '', [('active', 1, 0)]
|
||||||
|
)]
|
@ -1,4 +1,4 @@
|
|||||||
from utils import Commit, to_pr
|
from utils import Commit, to_pr, make_basic, prevent_unstaging
|
||||||
|
|
||||||
|
|
||||||
def test_staging_disabled_branch(env, project, repo, config):
|
def test_staging_disabled_branch(env, project, repo, config):
|
||||||
@ -26,3 +26,95 @@ def test_staging_disabled_branch(env, project, repo, config):
|
|||||||
"master is allowed to stage, should be staged"
|
"master is allowed to stage, should be staged"
|
||||||
assert not to_pr(env, other_pr).staging_id, \
|
assert not to_pr(env, other_pr).staging_id, \
|
||||||
"other is *not* allowed to stage, should not be staged"
|
"other is *not* allowed to stage, should not be staged"
|
||||||
|
|
||||||
|
|
||||||
|
def test_staged_failure(env, config, repo, users):
|
||||||
|
"""If a PR is staged and gets a new CI failure, it should be unstaged
|
||||||
|
|
||||||
|
This was an issue with odoo/odoo#165931 which got rebuilt and that triggered
|
||||||
|
a failure, which made the PR !ready but kept the staging going. So the PR
|
||||||
|
ended up in an odd state of being both staged and not ready.
|
||||||
|
|
||||||
|
And while the CI failure it got was a false positive, it was in fact the
|
||||||
|
problematic PR breaking that staging.
|
||||||
|
|
||||||
|
More relevant the runbot's "automatic rebase" mode sends CI to the original
|
||||||
|
commits so receiving legitimate failures after staging very much makes
|
||||||
|
sense e.g. an old PR is staged, the staging starts failing, somebody notices
|
||||||
|
the outdated PR and triggers autorebase, which fails (because something
|
||||||
|
incompatible was merged in the meantime), the PR *should* be unstaged.
|
||||||
|
"""
|
||||||
|
with repo:
|
||||||
|
repo.make_commits(None, Commit("master", tree={'a': '1'}), ref="heads/master")
|
||||||
|
|
||||||
|
repo.make_commits('master', Commit('c', tree={'a': 'b'}), ref="heads/mybranch")
|
||||||
|
pr = repo.make_pr(target='master', head='mybranch')
|
||||||
|
repo.post_status('mybranch', 'success')
|
||||||
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
pr_id = to_pr(env, pr)
|
||||||
|
staging = pr_id.staging_id
|
||||||
|
assert staging, "pr should be staged"
|
||||||
|
|
||||||
|
with repo:
|
||||||
|
# started rebuild, nothing should happen
|
||||||
|
repo.post_status('mybranch', 'pending')
|
||||||
|
env.run_crons()
|
||||||
|
assert pr_id.staging_id
|
||||||
|
# can't find a clean way to keep this "ready" when transitioning from
|
||||||
|
# success to pending *without updating the head*, at least not without
|
||||||
|
# adding a lot of contextual information around `_compute_statuses`
|
||||||
|
# assert pr_id.state == 'ready'
|
||||||
|
|
||||||
|
with repo:
|
||||||
|
# rebuild failed omg!
|
||||||
|
repo.post_status('mybranch', 'failure')
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
assert pr_id.status == 'failure'
|
||||||
|
assert pr_id.state == 'approved'
|
||||||
|
|
||||||
|
assert not pr_id.staging_id, "pr should be unstaged"
|
||||||
|
assert staging.state == "cancelled"
|
||||||
|
assert staging.reason == f"{pr_id.display_name} had CI failure after staging"
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_unready(env, config, repo, users):
|
||||||
|
"""Less likely with `test_staged_failure` fixing most of the issue, but
|
||||||
|
clearly the assumption that a staged PR will be `ready` is not strictly
|
||||||
|
enforced.
|
||||||
|
|
||||||
|
As such, updating the PR should try to `unstage` it no matter what state
|
||||||
|
it's in, this will lead to slightly higher loads on sync but loads on the
|
||||||
|
mergebot are generally non-existent outside of the git maintenance cron,
|
||||||
|
and there are doubtless other optimisations missing, or that (and other
|
||||||
|
items) can be done asynchronously.
|
||||||
|
"""
|
||||||
|
with repo:
|
||||||
|
repo.make_commits(None, Commit("master", tree={'a': '1'}), ref="heads/master")
|
||||||
|
|
||||||
|
repo.make_commits('master', Commit('c', tree={'a': 'b'}), ref="heads/mybranch")
|
||||||
|
pr = repo.make_pr(target='master', head='mybranch')
|
||||||
|
repo.post_status('mybranch', 'success')
|
||||||
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
|
|
||||||
|
[c2] = repo.make_commits('master', Commit('c', tree={'a': 'c'}))
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
pr_id = to_pr(env, pr)
|
||||||
|
staging = pr_id.staging_id
|
||||||
|
assert staging, "pr should be staged"
|
||||||
|
|
||||||
|
with prevent_unstaging(pr_id.staging_id):
|
||||||
|
pr_id.overrides = '{"default": {"state": "failure"}}'
|
||||||
|
assert pr_id.state == "approved"
|
||||||
|
assert pr_id.staging_id, "pr should not have been unstaged because we cheated"
|
||||||
|
|
||||||
|
with repo:
|
||||||
|
repo.update_ref("heads/mybranch", c2, force=True)
|
||||||
|
env.run_crons()
|
||||||
|
|
||||||
|
assert not pr_id.staging_id, "pr should be unstaged"
|
||||||
|
assert staging.state == "cancelled"
|
||||||
|
assert staging.reason == f"{pr_id.display_name} updated by {users['user']}"
|
||||||
|
@ -115,6 +115,7 @@
|
|||||||
<field name="state"/>
|
<field name="state"/>
|
||||||
<field name="author"/>
|
<field name="author"/>
|
||||||
<field name="reviewed_by"/>
|
<field name="reviewed_by"/>
|
||||||
|
<field name="write_date"/>
|
||||||
</tree>
|
</tree>
|
||||||
</field>
|
</field>
|
||||||
</record>
|
</record>
|
||||||
@ -379,6 +380,15 @@
|
|||||||
<field name="res_model">runbot_merge.commit</field>
|
<field name="res_model">runbot_merge.commit</field>
|
||||||
<field name="view_mode">tree,form</field>
|
<field name="view_mode">tree,form</field>
|
||||||
</record>
|
</record>
|
||||||
|
<record id="runbot_merge_commits_search" model="ir.ui.view">
|
||||||
|
<field name="name">commits search</field>
|
||||||
|
<field name="model">runbot_merge.commit</field>
|
||||||
|
<field name="arch" type="xml">
|
||||||
|
<search>
|
||||||
|
<field name="sha" operator="="/>
|
||||||
|
</search>
|
||||||
|
</field>
|
||||||
|
</record>
|
||||||
<record id="runbot_merge_commits_tree" model="ir.ui.view">
|
<record id="runbot_merge_commits_tree" model="ir.ui.view">
|
||||||
<field name="name">commits list</field>
|
<field name="name">commits list</field>
|
||||||
<field name="model">runbot_merge.commit</field>
|
<field name="model">runbot_merge.commit</field>
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
<field name="res_model">runbot_merge.pull_requests.feedback</field>
|
<field name="res_model">runbot_merge.pull_requests.feedback</field>
|
||||||
</record>
|
</record>
|
||||||
<record id="tree_feedback" model="ir.ui.view">
|
<record id="tree_feedback" model="ir.ui.view">
|
||||||
<field name="name">Feedback</field>
|
<field name="name">Feedback List</field>
|
||||||
<field name="model">runbot_merge.pull_requests.feedback</field>
|
<field name="model">runbot_merge.pull_requests.feedback</field>
|
||||||
<field name="arch" type="xml">
|
<field name="arch" type="xml">
|
||||||
<tree>
|
<tree>
|
||||||
@ -81,17 +81,86 @@
|
|||||||
</field>
|
</field>
|
||||||
</record>
|
</record>
|
||||||
|
|
||||||
<menuitem name="Queues" id="menu_queues" parent="runbot_merge_menu"/>
|
<record id="action_patches" model="ir.actions.act_window">
|
||||||
|
<field name="name">Patches</field>
|
||||||
|
<field name="res_model">runbot_merge.patch</field>
|
||||||
|
</record>
|
||||||
|
<record id="search_patch" model="ir.ui.view">
|
||||||
|
<field name="name">Patches Search</field>
|
||||||
|
<field name="model">runbot_merge.patch</field>
|
||||||
|
<field name="arch" type="xml">
|
||||||
|
<search>
|
||||||
|
<filter string="Inactive" name="active" domain="[('active', '=', False)]"/>
|
||||||
|
<field name="target"/>
|
||||||
|
<field name="repository"/>
|
||||||
|
</search>
|
||||||
|
</field>
|
||||||
|
</record>
|
||||||
|
<record id="tree_patch" model="ir.ui.view">
|
||||||
|
<field name="name">Patches List</field>
|
||||||
|
<field name="model">runbot_merge.patch</field>
|
||||||
|
<field name="arch" type="xml">
|
||||||
|
<tree>
|
||||||
|
<field name="id"/>
|
||||||
|
<field name="repository"/>
|
||||||
|
<field name="target"/>
|
||||||
|
</tree>
|
||||||
|
</field>
|
||||||
|
</record>
|
||||||
|
<record id="form_patch" model="ir.ui.view">
|
||||||
|
<field name="name">Patches Form</field>
|
||||||
|
<field name="model">runbot_merge.patch</field>
|
||||||
|
<field name="arch" type="xml">
|
||||||
|
<form>
|
||||||
|
<sheet>
|
||||||
|
<group>
|
||||||
|
<group>
|
||||||
|
<field name="repository"/>
|
||||||
|
<field name="target"/>
|
||||||
|
</group>
|
||||||
|
<group>
|
||||||
|
<field name="active"/>
|
||||||
|
</group>
|
||||||
|
</group>
|
||||||
|
<group attrs="{'invisible': [
|
||||||
|
('commit', '=', False),
|
||||||
|
('patch', '!=', False),
|
||||||
|
]}">
|
||||||
|
<group colspan="4">
|
||||||
|
<field name="commit"/>
|
||||||
|
</group>
|
||||||
|
</group>
|
||||||
|
<group attrs="{'invisible': [
|
||||||
|
('patch', '=', False),
|
||||||
|
('commit', '!=', False),
|
||||||
|
]}">
|
||||||
|
<group colspan="4">
|
||||||
|
<field name="format" colspan="4"/>
|
||||||
|
<field name="patch" widget="ace"/>
|
||||||
|
<!-- no diff/patch mode support -->
|
||||||
|
<!-- options="{'mode': 'patch'}"/> -->
|
||||||
|
<field name="message" colspan="4"/>
|
||||||
|
</group>
|
||||||
|
</group>
|
||||||
|
</sheet>
|
||||||
|
<div class="oe_chatter">
|
||||||
|
<field name="message_follower_ids" widget="mail_followers"/>
|
||||||
|
<field name="message_ids" widget="mail_thread"/>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</field>
|
||||||
|
</record>
|
||||||
|
|
||||||
|
<menuitem name="Queues" id="menu_queues" parent="runbot_merge_menu">
|
||||||
<menuitem name="Splits" id="menu_queues_splits"
|
<menuitem name="Splits" id="menu_queues_splits"
|
||||||
parent="menu_queues"
|
|
||||||
action="action_splits"/>
|
action="action_splits"/>
|
||||||
<menuitem name="Feedback" id="menu_queues_feedback"
|
<menuitem name="Feedback" id="menu_queues_feedback"
|
||||||
parent="menu_queues"
|
|
||||||
action="action_feedback"/>
|
action="action_feedback"/>
|
||||||
<menuitem name="Tagging" id="menu_queues_tagging"
|
<menuitem name="Tagging" id="menu_queues_tagging"
|
||||||
parent="menu_queues"
|
|
||||||
action="action_tagging"/>
|
action="action_tagging"/>
|
||||||
<menuitem name="Fetches" id="menu_fetches"
|
<menuitem name="Fetches" id="menu_fetches"
|
||||||
parent="menu_queues"
|
|
||||||
action="action_fetches"/>
|
action="action_fetches"/>
|
||||||
|
<menuitem name="Patches" id="menu_patches"
|
||||||
|
action="action_patches"/>
|
||||||
|
</menuitem>
|
||||||
</odoo>
|
</odoo>
|
||||||
|
@ -5,21 +5,18 @@
|
|||||||
</function>
|
</function>
|
||||||
|
|
||||||
<template id="link-pr" name="create a link to `pr`">
|
<template id="link-pr" name="create a link to `pr`">
|
||||||
|
<t t-set="title"
|
||||||
|
t-value="pr.message.split('\n', 1)[0] if pr.repository.group_id <= env.user.groups_id else ''"/>
|
||||||
<t t-set="title">
|
<t t-set="title">
|
||||||
<t t-if="pr.repository.group_id <= env.user.groups_id">
|
<t t-if="title and pr.blocked" >
|
||||||
<t t-out="pr.message.split('\n', 1)[0]"/>
|
<t t-out="title"/>: <t t-out="pr.blocked"/>
|
||||||
</t>
|
|
||||||
</t>
|
|
||||||
<t t-set="title">
|
|
||||||
<t t-if="title.strip() and pr.blocked" >
|
|
||||||
<t t-out="title.strip()"/>: <t t-out="pr.blocked"/>
|
|
||||||
</t>
|
</t>
|
||||||
<t t-else="">
|
<t t-else="">
|
||||||
<t t-out="pr.blocked or title.strip()"/>
|
<t t-out="pr.blocked or title"/>
|
||||||
</t>
|
</t>
|
||||||
</t>
|
</t>
|
||||||
<a t-attf-href="https://github.com/{{ pr.repository.name }}/pull/{{ pr.number }}"
|
<a t-attf-href="https://github.com/{{ pr.repository.name }}/pull/{{ pr.number }}"
|
||||||
t-att-title="title"
|
t-att-title="title.strip()"
|
||||||
t-att-target="target or None"
|
t-att-target="target or None"
|
||||||
t-att-class="classes or None"
|
t-att-class="classes or None"
|
||||||
><t t-esc="pr.display_name"/></a>
|
><t t-esc="pr.display_name"/></a>
|
||||||
@ -177,14 +174,15 @@
|
|||||||
<t t-if="staging_index >= 4">d-none d-lg-block</t>
|
<t t-if="staging_index >= 4">d-none d-lg-block</t>
|
||||||
</t>
|
</t>
|
||||||
<t t-set="title">
|
<t t-set="title">
|
||||||
<t t-if="staging.state == 'ff_failed'">fast forward failed (<t t-esc="staging.reason"/>)</t>
|
<t t-if="staging.state == 'success'"/>
|
||||||
<t t-if="staging.state == 'pending'">last status</t>
|
<t t-else="">
|
||||||
|
<t t-if="staging.state == 'pending'">last status</t
|
||||||
|
><t t-elif="staging.state == 'ff_failed'">fast forward failed (<t t-out="(staging.reason or '').replace('\'', '')"/>)</t
|
||||||
|
><t t-else="" t-out="(staging.reason or '').replace('\'', '')"
|
||||||
|
/> at <t t-out="(staging.staging_end or staging.write_date).replace(microsecond=0)"/>Z
|
||||||
|
</t>
|
||||||
</t>
|
</t>
|
||||||
<!-- separate concatenation to avoid having line-break in title as some browsers trigger it -->
|
<li t-attf-class="staging {{stateclass.strip()}} {{decorationclass.strip()}}" t-att-title="title.strip() or None">
|
||||||
<!-- write-date may have microsecond precision, strip that information -->
|
|
||||||
<!-- use write-date under assumption that a staging is last modified when it ends -->
|
|
||||||
<t t-set="title"><t t-esc="title.strip() or staging.reason"/> at <t t-esc="staging.write_date.replace(microsecond=0)"/>Z</t>
|
|
||||||
<li t-attf-class="staging {{stateclass.strip()}} {{decorationclass.strip()}}" t-att-title="title">
|
|
||||||
<ul class="list-unstyled">
|
<ul class="list-unstyled">
|
||||||
<li t-foreach="staging.batch_ids" t-as="batch" class="batch">
|
<li t-foreach="staging.batch_ids" t-as="batch" class="batch">
|
||||||
<t t-esc="batch.prs[:1].label"/>
|
<t t-esc="batch.prs[:1].label"/>
|
||||||
@ -241,14 +239,14 @@
|
|||||||
</t>
|
</t>
|
||||||
</t>
|
</t>
|
||||||
<t t-set="title">
|
<t t-set="title">
|
||||||
<t t-if="staging.state == 'canceled'">Cancelled:
|
<t t-if="staging.state == 'ff_failed'">
|
||||||
<t t-esc="staging.reason"/>
|
Fast Forward Failed
|
||||||
</t>
|
</t>
|
||||||
<t t-if="staging.state == 'ff_failed'">Fast
|
<t t-elif="staging.state == 'canceled'">
|
||||||
Forward Failed
|
Cancelled<t t-if="staging.reason">: <t t-out="staging.reason.replace('\'', '')"/></t>
|
||||||
</t>
|
</t>
|
||||||
<t t-if="staging.state not in ('canceled', 'ff_failed')">
|
<t t-else="">
|
||||||
<t t-esc="staging.reason"/>
|
<t t-out="(staging.reason or '').replace('\'', '')"/>
|
||||||
</t>
|
</t>
|
||||||
</t>
|
</t>
|
||||||
<tr t-att-class="stateclass"
|
<tr t-att-class="stateclass"
|
||||||
@ -467,6 +465,8 @@ if not genealogy:
|
|||||||
repos = pr.repository
|
repos = pr.repository
|
||||||
elif all(p.state in ('merged', 'closed') for p in genealogy[-1].all_prs):
|
elif all(p.state in ('merged', 'closed') for p in genealogy[-1].all_prs):
|
||||||
branches = (project.branch_ids & targets)[::-1]
|
branches = (project.branch_ids & targets)[::-1]
|
||||||
|
elif pr.batch_id.fw_policy == 'no':
|
||||||
|
branches = pr.target
|
||||||
else:
|
else:
|
||||||
# if the tip of the genealogy is not closed, extend to the furthest limit,
|
# if the tip of the genealogy is not closed, extend to the furthest limit,
|
||||||
# keeping branches which are active or have an associated batch / PR
|
# keeping branches which are active or have an associated batch / PR
|
||||||
|
Loading…
Reference in New Issue
Block a user