[MERGE] bot from 16.0 to 17.0

Broken (can't run odoo at all):

- In Odoo 17.0, the `pre_init_hook` takes an env, not a cursor, update
  `_check_citext`.
- Odoo 17.0 rejects `@attrs` and doesn't say where they are or how to
  update them, fun, hunt down `attrs={'invisible': ...` and try to fix
  them.
- Odoo 17.0 warns on non-multi creates, update them, most were very
  reasonable, one very wasn't.

Test failures:

- Odoo 17.0 deprecates `name_get` and doesn't use it as a *source*
  anymore, replace overrides by overrides to `_compute_display_name`.
- Multiple tracking changes:
  - `_track_set_author` takes a `Partner` not an id.
  - `_message_compute_author` still requires overriding in order to
    handle record creation, which in standard doesn't support author
    overriding.
  - `mail.tracking.value.field_type` has been removed, the field type
    now needs to be retrieved from the `field_id`.
  - Some tracking ordering have changed and require adjusting a few
    tests.

Also added a few flushes before SQL queries which are not (obviously
at least) at the start of a cron or controller, no test failure
observed but better safe than sorry (probably).
This commit is contained in:
Xavier Morel 2024-08-12 13:13:03 +02:00
commit aa1df22657
88 changed files with 11638 additions and 4276 deletions

View File

@ -1,4 +1,12 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
import datetime
import errno
import select
import shutil
import threading
from typing import Optional
"""
Configuration:
@ -46,6 +54,7 @@ import collections
import configparser
import contextlib
import copy
import fcntl
import functools
import http.client
import itertools
@ -64,7 +73,6 @@ import warnings
import xmlrpc.client
from contextlib import closing
import psutil
import pytest
import requests
@ -79,7 +87,7 @@ def pytest_addoption(parser):
parser.addoption('--coverage', action='store_true')
parser.addoption(
'--tunnel', action="store", type="choice", choices=['', 'ngrok', 'localtunnel'], default='',
'--tunnel', action="store", choices=['', 'ngrok', 'localtunnel'], default='',
help="Which tunneling method to use to expose the local Odoo server "
"to hook up github's webhook. ngrok is more reliable, but "
"creating a free account is necessary to avoid rate-limiting "
@ -88,11 +96,27 @@ def pytest_addoption(parser):
"blow through the former); localtunnel has no rate-limiting but "
"the servers are way less reliable")
def is_manager(config):
return not hasattr(config, 'workerinput')
# noinspection PyUnusedLocal
def pytest_configure(config):
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'mergebot_test_utils'))
config.addinivalue_line(
"markers",
"expect_log_errors(reason): allow and require tracebacks in the log",
)
config.addinivalue_line(
"markers",
"defaultstatuses: use the statuses `default` rather than `ci/runbot,legal/cla`",
)
def pytest_unconfigure(config):
if not is_manager(config):
return
for c in config._tmp_path_factory.getbasetemp().iterdir():
if c.is_file() and c.name.startswith('template-'):
subprocess.run(['dropdb', '--if-exists', c.read_text(encoding='utf-8')])
@pytest.fixture(scope='session', autouse=True)
def _set_socket_timeout():
@ -143,6 +167,14 @@ def rolemap(request, config):
@pytest.fixture
def partners(env, config, rolemap):
"""This specifically does not create partners for ``user`` and ``other``
so they can be generated on-interaction, as "external" users.
The two differ in that ``user`` has ownership of the org and can manage
repos there, ``other`` is completely unrelated to anything so useful to
check for interaction where the author only has read access to the reference
repositories.
"""
m = {}
for role, u in rolemap.items():
if role in ('user', 'other'):
@ -187,6 +219,7 @@ def tunnel(pytestconfig, port):
if tunnel == '':
yield f'http://localhost:{port}'
elif tunnel == 'ngrok':
own = None
web_addr = 'http://localhost:4040/api'
addr = 'localhost:%d' % port
# try to find out if ngrok is running, and if it's not attempt
@ -195,13 +228,9 @@ def tunnel(pytestconfig, port):
# FIXME: this is for xdist to avoid workers running ngrok at the
# exact same time, use lockfile instead
time.sleep(random.SystemRandom().randint(1, 10))
# FIXME: use config file so we can set web_addr to something else
# than localhost:4040 (otherwise we can't disambiguate
# between the ngrok we started and an ngrok started by
# some other user)
requests.get(web_addr)
except requests.exceptions.ConnectionError:
subprocess.Popen(NGROK_CLI, stdout=subprocess.DEVNULL)
own = subprocess.Popen(NGROK_CLI, stdout=subprocess.DEVNULL)
for _ in range(5):
time.sleep(1)
with contextlib.suppress(requests.exceptions.ConnectionError):
@ -213,8 +242,8 @@ def tunnel(pytestconfig, port):
requests.post(f'{web_addr}/tunnels', json={
'name': str(port),
'proto': 'http',
'bind_tls': True, # only https
'addr': addr,
'schemes': ['https'],
'inspect': True,
}).raise_for_status()
@ -242,17 +271,14 @@ def tunnel(pytestconfig, port):
raise TimeoutError("ngrok tunnel deletion failed")
r = requests.get(f'{web_addr}/tunnels')
assert r.ok, f'{r.reason} {r.text}'
# there are still tunnels in the list -> bail
if r.ok and r.json()['tunnels']:
if not own or r.json()['tunnels']:
return
# ngrok is broken or all tunnels have been shut down -> try to
# find and kill it (but only if it looks a lot like we started it)
for p in psutil.process_iter():
if p.name() == 'ngrok' and p.cmdline() == NGROK_CLI:
p.terminate()
break
return
# no more tunnels and we started ngrok -> try to kill it
own.terminate()
own.wait(30)
else:
raise TimeoutError("ngrok tunnel creation failed (?)")
elif tunnel == 'localtunnel':
@ -269,39 +295,73 @@ def tunnel(pytestconfig, port):
raise ValueError("Unsupported %s tunnel method" % tunnel)
class DbDict(dict):
def __init__(self, adpath):
def __init__(self, adpath, shared_dir):
super().__init__()
self._adpath = adpath
self._shared_dir = shared_dir
def __missing__(self, module):
self[module] = db = 'template_%s' % uuid.uuid4()
with tempfile.TemporaryDirectory() as d:
with contextlib.ExitStack() as atexit:
f = atexit.enter_context(os.fdopen(os.open(
self._shared_dir / f'template-{module}',
os.O_CREAT | os.O_RDWR
), mode="r+", encoding='utf-8'))
fcntl.lockf(f, fcntl.LOCK_EX)
atexit.callback(fcntl.lockf, f, fcntl.LOCK_UN)
db = f.read()
if db:
self[module] = db
return db
d = (self._shared_dir / f'shared-{module}')
d.mkdir()
self[module] = db = 'template_%s' % uuid.uuid4()
subprocess.run([
'odoo', '--no-http',
'--addons-path', self._adpath,
'-d', db, '-i', module + ',auth_oauth',
*(['--addons-path', self._adpath] if self._adpath else []),
'-d', db, '-i', module + ',saas_worker,auth_oauth',
'--max-cron-threads', '0',
'--stop-after-init',
'--log-level', 'warn'
'--log-level', 'warn',
'--log-handler', 'py.warnings:ERROR',
],
check=True,
env={**os.environ, 'XDG_DATA_HOME': d}
env={**os.environ, 'XDG_DATA_HOME': str(d)}
)
f.write(db)
f.flush()
os.fsync(f.fileno())
subprocess.run(['psql', db, '-c', "UPDATE ir_cron SET nextcall = 'infinity'"])
return db
@pytest.fixture(scope='session')
def dbcache(request):
def dbcache(request, tmp_path_factory, addons_path):
""" Creates template DB once per run, then just duplicates it before
starting odoo and running the testcase
"""
dbs = DbDict(request.config.getoption('--addons-path'))
shared_dir = tmp_path_factory.getbasetemp()
if not is_manager(request.config):
# xdist workers get a subdir as their basetemp, so we need to go one
# level up to deref it
shared_dir = shared_dir.parent
dbs = DbDict(addons_path, shared_dir)
yield dbs
for db in dbs.values():
subprocess.run(['dropdb', db], check=True)
@pytest.fixture
def db(request, module, dbcache):
def db(request, module, dbcache, tmpdir):
template_db = dbcache[module]
rundb = str(uuid.uuid4())
subprocess.run(['createdb', '-T', dbcache[module], rundb], check=True)
subprocess.run(['createdb', '-T', template_db, rundb], check=True)
share = tmpdir.mkdir('share')
shutil.copytree(
str(dbcache._shared_dir / f'shared-{module}'),
str(share),
dirs_exist_ok=True,
)
(share / 'Odoo' / 'filestore' / template_db).rename(
share / 'Odoo' / 'filestore' / rundb)
yield rundb
@ -323,12 +383,14 @@ def wait_for_server(db, port, proc, mod, timeout=120):
try:
uid = xmlrpc.client.ServerProxy(
'http://localhost:{}/xmlrpc/2/common'.format(port))\
.authenticate(db, 'admin', 'admin', {})
f'http://localhost:{port}/xmlrpc/2/common'
).authenticate(db, 'admin', 'admin', {
'base_location': f"http://localhost:{port}",
})
mods = xmlrpc.client.ServerProxy(
'http://localhost:{}/xmlrpc/2/object'.format(port))\
.execute_kw(
db, uid, 'admin', 'ir.module.module', 'search_read', [
f'http://localhost:{port}/xmlrpc/2/object'
).execute_kw(
db, uid, 'admin', 'ir.module.module', 'search_read', [
[('name', '=', mod)], ['state']
])
if mods and mods[0].get('state') == 'installed':
@ -344,39 +406,128 @@ def port():
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
@pytest.fixture
def page(port):
with requests.Session() as s:
def get(url):
r = s.get('http://localhost:{}{}'.format(port, url))
r.raise_for_status()
return r.content
yield get
@pytest.fixture(scope='session')
def dummy_addons_path():
with tempfile.TemporaryDirectory() as dummy_addons_path:
mod = pathlib.Path(dummy_addons_path, 'saas_worker')
mod.mkdir(0o700)
(mod / '__init__.py').write_bytes(b'')
(mod / '__init__.py').write_text('''\
import builtins
import logging
import threading
import psycopg2
import odoo
from odoo import api, fields, models
_logger = logging.getLogger(__name__)
class Base(models.AbstractModel):
_inherit = 'base'
def run_crons(self):
builtins.forwardport_merged_before = self.env.context.get('forwardport_merged_before')
builtins.forwardport_updated_before = self.env.context.get('forwardport_updated_before')
self.env['ir.cron']._process_jobs(self.env.cr.dbname)
del builtins.forwardport_updated_before
del builtins.forwardport_merged_before
return True
class IrCron(models.Model):
_inherit = 'ir.cron'
@classmethod
def _process_jobs(cls, db_name):
t = threading.current_thread()
try:
db = odoo.sql_db.db_connect(db_name)
t.dbname = db_name
with db.cursor() as cron_cr:
# FIXME: override `_get_all_ready_jobs` to directly lock the cron?
while jobs := next((
job
for j in cls._get_all_ready_jobs(cron_cr)
if (job := cls._acquire_one_job(cron_cr, (j['id'],)))
), None):
# take into account overridings of _process_job() on that database
registry = odoo.registry(db_name)
registry[cls._name]._process_job(db, cron_cr, job)
cron_cr.commit()
except psycopg2.ProgrammingError as e:
raise
except Exception:
_logger.warning('Exception in cron:', exc_info=True)
finally:
if hasattr(t, 'dbname'):
del t.dbname
''', encoding='utf-8')
(mod / '__manifest__.py').write_text(pprint.pformat({
'name': 'dummy saas_worker',
'version': '1.0',
}), encoding='utf-8')
(mod / 'util.py').write_text("""\
def from_role(_):
def from_role(*_, **__):
return lambda fn: fn
""", encoding='utf-8')
yield dummy_addons_path
@pytest.fixture(scope='session')
def addons_path(request, dummy_addons_path):
return ','.join(map(str, filter(None, [
request.config.getoption('--addons-path'),
dummy_addons_path,
])))
@pytest.fixture
def server(request, db, port, module, dummy_addons_path, tmpdir):
def server(request, db, port, module, addons_path, tmpdir):
log_handlers = [
'odoo.modules.loading:WARNING',
'py.warnings:ERROR',
]
if not request.config.getoption('--log-github'):
log_handlers.append('github_requests:WARNING')
addons_path = ','.join(map(str, [
request.config.getoption('--addons-path'),
dummy_addons_path,
]))
cov = []
if request.config.getoption('--coverage'):
cov = ['coverage', 'run', '-p', '--source=odoo.addons.runbot_merge,odoo.addons.forwardport', '--branch']
cov = [
'coverage', 'run',
'-p', '--branch',
'--source=odoo.addons.runbot_merge,odoo.addons.forwardport',
'--context', request.node.nodeid,
'-m',
]
r, w = os.pipe2(os.O_NONBLOCK)
buf = bytearray()
def _move(inpt=r, output=sys.stdout.fileno()):
while p.poll() is None:
readable, _, _ = select.select([inpt], [], [], 1)
if readable:
r = os.read(inpt, 4096)
if not r:
break
try:
os.write(output, r)
except OSError as e:
if e.errno == errno.EBADF:
break
raise
buf.extend(r)
os.close(inpt)
p = subprocess.Popen([
*cov,
@ -385,25 +536,46 @@ def server(request, db, port, module, dummy_addons_path, tmpdir):
'-d', db,
'--max-cron-threads', '0', # disable cron threads (we're running crons by hand)
*itertools.chain.from_iterable(('--log-handler', h) for h in log_handlers),
], env={
], stderr=w, env={
**os.environ,
# stop putting garbage in the user dirs, and potentially creating conflicts
# TODO: way to override this with macOS?
'XDG_DATA_HOME': str(tmpdir.mkdir('share')),
'XDG_DATA_HOME': str(tmpdir / 'share'),
'XDG_CACHE_HOME': str(tmpdir.mkdir('cache')),
})
os.close(w)
# start the reader thread here so `_move` can read `p` without needing
# additional handholding
threading.Thread(target=_move, daemon=True).start()
try:
wait_for_server(db, port, p, module)
yield p
yield p, buf
finally:
p.terminate()
p.wait(timeout=30)
@pytest.fixture
def env(port, server, db, default_crons):
yield Environment(port, db, default_crons)
def env(request, port, server, db):
yield Environment(port, db)
if request.node.get_closest_marker('expect_log_errors'):
if b"Traceback (most recent call last):" not in server[1]:
pytest.fail("should have found error in logs.")
else:
if b"Traceback (most recent call last):" in server[1]:
pytest.fail("unexpected error in logs, fix, or mark function as `expect_log_errors` to require.")
@pytest.fixture
def reviewer_admin(env, partners):
env['res.users'].create({
'partner_id': partners['reviewer'].id,
'login': 'reviewer',
'groups_id': [
(4, env.ref("base.group_user").id, 0),
(4, env.ref("runbot_merge.group_admin").id, 0),
],
})
def check(response):
assert response.ok, response.text or response.reason
@ -412,6 +584,10 @@ def check(response):
# to) break the existing local tests
@pytest.fixture
def make_repo(capsys, request, config, tunnel, users):
"""Fixtures which creates a repository on the github side, plugs webhooks
in, and registers the repository for deletion on cleanup (unless
``--no-delete`` is set)
"""
owner = config['github']['owner']
github = requests.Session()
github.headers['Authorization'] = 'token %s' % config['github']['token']
@ -489,7 +665,6 @@ def _rate_limited(req):
if not q.ok and q.headers.get('X-RateLimit-Remaining') == '0':
reset = int(q.headers['X-RateLimit-Reset'])
delay = max(0, round(reset - time.time() + 1.0))
print("Hit rate limit, sleeping for", delay, "seconds")
time.sleep(delay)
continue
break
@ -505,6 +680,9 @@ class Repo:
self.hook = False
repos.append(self)
def __repr__(self):
return f'<conftest.Repo {self.name}>'
@property
def owner(self):
return self.name.split('/')[0]
@ -542,14 +720,13 @@ class Repo:
assert self.hook
r = self._session.get(
'https://api.github.com/repos/{}/hooks'.format(self.name))
response = r.json()
assert 200 <= r.status_code < 300, response
[hook] = response
assert 200 <= r.status_code < 300, r.text
[hook] = r.json()
r = self._session.patch('https://api.github.com/repos/{}/hooks/{}'.format(self.name, hook['id']), json={
'config': {**hook['config'], 'secret': secret},
})
assert 200 <= r.status_code < 300, r.json()
assert 200 <= r.status_code < 300, r.text
def get_ref(self, ref):
# differs from .commit(ref).id for the sake of assertion error messages
@ -574,7 +751,7 @@ class Repo:
assert res['object']['type'] == 'commit'
return res['object']['sha']
def commit(self, ref):
def commit(self, ref: str) -> Commit:
if not re.match(r'[0-9a-f]{40}', ref):
if not ref.startswith(('heads/', 'refs/heads/')):
ref = 'refs/heads/' + ref
@ -585,12 +762,11 @@ class Repo:
ref = 'refs/' + ref
r = self._session.get('https://api.github.com/repos/{}/commits/{}'.format(self.name, ref))
response = r.json()
assert 200 <= r.status_code < 300, response
assert 200 <= r.status_code < 300, r.text
return self._commit_from_gh(response)
return self._commit_from_gh(r.json())
def _commit_from_gh(self, gh_commit):
def _commit_from_gh(self, gh_commit: dict) -> Commit:
c = gh_commit['commit']
return Commit(
id=gh_commit['sha'],
@ -608,14 +784,14 @@ class Repo:
:rtype: Dict[str, str]
"""
r = self._session.get('https://api.github.com/repos/{}/git/trees/{}'.format(self.name, commit.tree))
assert 200 <= r.status_code < 300, r.json()
assert 200 <= r.status_code < 300, r.text
# read tree's blobs
tree = {}
for t in r.json()['tree']:
assert t['type'] == 'blob', "we're *not* doing recursive trees in test cases"
r = self._session.get('https://api.github.com/repos/{}/git/blobs/{}'.format(self.name, t['sha']))
assert 200 <= r.status_code < 300, r.json()
assert 200 <= r.status_code < 300, r.text
tree[t['path']] = base64.b64decode(r.json()['content']).decode()
return tree
@ -645,7 +821,7 @@ class Repo:
'required_pull_request_reviews': None,
'restrictions': None,
})
assert 200 <= r.status_code < 300, r.json()
assert 200 <= r.status_code < 300, r.text
# FIXME: remove this (runbot_merge should use make_commits directly)
def make_commit(self, ref, message, author, committer=None, tree=None, wait=True):
@ -748,7 +924,16 @@ class Repo:
)).raise_for_status()
return PR(self, number)
def make_pr(self, *, title=None, body=None, target, head, draft=False, token=None):
def make_pr(
self,
*,
title: Optional[str] = None,
body: Optional[str] = None,
target: str,
head: str,
draft: bool = False,
token: Optional[str] = None
) -> PR:
assert self.hook
self.hook = 2
@ -781,10 +966,9 @@ class Repo:
},
headers=headers,
)
pr = r.json()
assert 200 <= r.status_code < 300, pr
assert 200 <= r.status_code < 300, r.text
return PR(self, pr['number'])
return PR(self, r.json()['number'])
def post_status(self, ref, status, context='default', **kw):
assert self.hook
@ -795,7 +979,7 @@ class Repo:
'context': context,
**kw
})
assert 200 <= r.status_code < 300, r.json()
assert 200 <= r.status_code < 300, r.text
def is_ancestor(self, sha, of):
return any(c['sha'] == sha for c in self.log(of))
@ -806,7 +990,7 @@ class Repo:
'https://api.github.com/repos/{}/commits'.format(self.name),
params={'sha': ref_or_sha, 'page': page}
)
assert 200 <= r.status_code < 300, r.json()
assert 200 <= r.status_code < 300, r.text
yield from r.json()
if not r.links.get('next'):
return
@ -874,7 +1058,7 @@ class PR:
'https://api.github.com/repos/{}/pulls/{}'.format(self.repo.name, self.number),
headers=caching
)
assert r.ok, r.json()
assert r.ok, r.text
if r.status_code == 304:
return previous
contents, caching = self._cache = r.json(), {}
@ -919,7 +1103,7 @@ class PR:
@property
def comments(self):
r = self.repo._session.get('https://api.github.com/repos/{}/issues/{}/comments'.format(self.repo.name, self.number))
assert 200 <= r.status_code < 300, r.json()
assert 200 <= r.status_code < 300, r.text
return [Comment(c) for c in r.json()]
@property
@ -936,7 +1120,7 @@ class PR:
json={'body': body},
headers=headers,
)
assert 200 <= r.status_code < 300, r.json()
assert 200 <= r.status_code < 300, r.text
return r.json()['id']
def edit_comment(self, cid, body, token=None):
@ -949,7 +1133,7 @@ class PR:
json={'body': body},
headers=headers
)
assert 200 <= r.status_code < 300, r.json()
assert 200 <= r.status_code < 300, r.text
wait_for_hook()
def delete_comment(self, cid, token=None):
@ -961,7 +1145,7 @@ class PR:
'https://api.github.com/repos/{}/issues/comments/{}'.format(self.repo.name, cid),
headers=headers
)
assert r.status_code == 204, r.json()
assert r.status_code == 204, r.text
def _set_prop(self, prop, value, token=None):
assert self.repo.hook
@ -985,7 +1169,7 @@ class PR:
self.repo.name,
self.number,
))
assert 200 <= r.status_code < 300, r.json()
assert 200 <= r.status_code < 300, r.text
info = r.json()
repo = self.repo
@ -1006,7 +1190,7 @@ class PR:
json={'body': body, 'event': state,},
headers=headers
)
assert 200 <= r.status_code < 300, r.json()
assert 200 <= r.status_code < 300, r.text
PRBranch = collections.namedtuple('PRBranch', 'repo branch')
class LabelsProxy(collections.abc.MutableSet):
@ -1017,7 +1201,7 @@ class LabelsProxy(collections.abc.MutableSet):
def _labels(self):
pr = self._pr
r = pr.repo._session.get('https://api.github.com/repos/{}/issues/{}/labels'.format(pr.repo.name, pr.number))
assert r.ok, r.json()
assert r.ok, r.text
return {label['name'] for label in r.json()}
def __repr__(self):
@ -1043,14 +1227,14 @@ class LabelsProxy(collections.abc.MutableSet):
r = pr.repo._session.post('https://api.github.com/repos/{}/issues/{}/labels'.format(pr.repo.name, pr.number), json={
'labels': [label]
})
assert r.ok, r.json()
assert r.ok, r.text
def discard(self, label):
pr = self._pr
assert pr.repo.hook
r = pr.repo._session.delete('https://api.github.com/repos/{}/issues/{}/labels/{}'.format(pr.repo.name, pr.number, label))
# discard should do nothing if the item didn't exist in the set
assert r.ok or r.status_code == 404, r.json()
assert r.ok or r.status_code == 404, r.text
def update(self, *others):
pr = self._pr
@ -1059,14 +1243,13 @@ class LabelsProxy(collections.abc.MutableSet):
r = pr.repo._session.post('https://api.github.com/repos/{}/issues/{}/labels'.format(pr.repo.name, pr.number), json={
'labels': list(set(itertools.chain.from_iterable(others)))
})
assert r.ok, r.json()
assert r.ok, r.text
class Environment:
def __init__(self, port, db, default_crons=()):
def __init__(self, port, db):
self._uid = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/common'.format(port)).authenticate(db, 'admin', 'admin', {})
self._object = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/object'.format(port))
self._db = db
self._default_crons = default_crons
def __call__(self, model, method, *args, **kwargs):
return self._object.execute_kw(
@ -1078,17 +1261,30 @@ class Environment:
def __getitem__(self, name):
return Model(self, name)
def ref(self, xid, raise_if_not_found=True):
model, obj_id = self(
'ir.model.data', 'check_object_reference',
*xid.split('.', 1),
raise_on_access_error=raise_if_not_found
)
return Model(self, model, [obj_id]) if obj_id else None
def run_crons(self, *xids, **kw):
crons = xids or self._default_crons
print('running crons', crons, file=sys.stderr)
crons = xids or ['runbot_merge.check_linked_prs_status']
cron_ids = []
for xid in crons:
t0 = time.time()
print('\trunning cron', xid, '...', file=sys.stderr)
if xid is None:
continue
model, cron_id = self('ir.model.data', 'check_object_reference', *xid.split('.', 1))
assert model == 'ir.cron', "Expected {} to be a cron, got {}".format(xid, model)
self('ir.cron', 'method_direct_trigger', [cron_id], **kw)
print('\tdone %.3fs' % (time.time() - t0), file=sys.stderr)
print('done', file=sys.stderr)
cron_ids.append(cron_id)
if cron_ids:
self('ir.cron', 'write', cron_ids, {
'nextcall': (datetime.datetime.utcnow() - datetime.timedelta(seconds=30)).isoformat(" ", "seconds")
}, **kw)
self('base', 'run_crons', [], **kw)
# sleep for some time as a lot of crap may have happened (?)
wait_for_hook()
@ -1117,6 +1313,9 @@ class Model:
def __len__(self):
return len(self._ids)
def __hash__(self):
return hash((self._model, frozenset(self._ids)))
def __eq__(self, other):
if not isinstance(other, Model):
return NotImplemented
@ -1144,9 +1343,13 @@ class Model:
# because sorted is not xmlrpc-compatible (it doesn't downgrade properly)
def sorted(self, field):
rs = self.read([field])
rs.sort(key=lambda r: r[field])
return Model(self._env, self._model, [r['id'] for r in rs])
fn = field if callable(field) else lambda r: r[field]
return Model(self._env, self._model, (
id
for record in sorted(self, key=fn)
for id in record.ids
))
def __getitem__(self, index):
if isinstance(index, str):

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
{
'name': 'forward port bot',
'version': '1.2',
'version': '1.4',
'summary': "A port which forward ports successful PRs.",
'depends': ['runbot_merge'],
'data': [

View File

@ -0,0 +1,10 @@
IMP: outstandings page
- increased time-before-outstanding from 3 to 7 days, as 3~4 days is common in
normal operations, especially when merging from very low branches were
forward-porting may take a while
- improved performances by optimising fetching & filtering
- added counts to the main listing for clarity (instead of hiding them in a
popover)
- added the *original authors* for the outstanding forward ports
- added ability to filter by team, if such are configured

View File

@ -1,7 +1,14 @@
import collections
import datetime
import pathlib
import werkzeug.urls
from odoo.http import route, request
from odoo.osv import expression
from odoo.addons.runbot_merge.controllers.dashboard import MergebotDashboard
DEFAULT_DELTA = datetime.timedelta(days=7)
class Dashboard(MergebotDashboard):
def _entries(self):
changelog = pathlib.Path(__file__).parent / 'changelog'
@ -13,3 +20,81 @@ class Dashboard(MergebotDashboard):
for d in changelog.iterdir()
]
@route('/forwardport/outstanding', type='http', methods=['GET'], auth="user", website=True, sitemap=False)
def outstanding(self, partner=0, authors=True, reviewers=True, group=0):
Partners = request.env['res.partner']
PullRequests = request.env['runbot_merge.pull_requests']
partner = Partners.browse(int(partner))
group = Partners.browse(int(group))
authors = int(authors)
reviewers = int(reviewers)
link = lambda **kw: '?' + werkzeug.urls.url_encode({'partner': partner.id or 0, 'authors': authors, 'reviewers': reviewers, **kw, })
groups = Partners.search([('is_company', '=', True), ('child_ids', '!=', False)])
if not (authors or reviewers):
return request.render('forwardport.outstanding', {
'authors': 0,
'reviewers': 0,
'single': partner,
'culprits': partner,
'groups': groups,
'current_group': group,
'outstanding': [],
'outstanding_per_author': {partner: 0},
'outstanding_per_reviewer': {partner: 0},
'link': link,
})
source_filter = [('merge_date', '<', datetime.datetime.now() - DEFAULT_DELTA)]
partner_filter = []
if partner or group:
if partner:
suffix = ''
arg = partner.id
else:
suffix = '.commercial_partner_id'
arg = group.id
if authors:
partner_filter.append([(f'author{suffix}', '=', arg)])
if reviewers:
partner_filter.append([(f'reviewed_by{suffix}', '=', arg)])
source_filter.extend(expression.OR(partner_filter))
outstanding = PullRequests.search([
('state', 'in', ['opened', 'validated', 'approved', 'ready', 'error']),
('source_id', 'in', PullRequests._search(source_filter)),
])
outstanding_per_group = collections.Counter()
outstanding_per_author = collections.Counter()
outstanding_per_reviewer = collections.Counter()
outstandings = []
for source in outstanding.mapped('source_id').sorted('merge_date'):
prs = source.forwardport_ids.filtered(lambda p: p.state not in ['merged', 'closed'])
outstandings.append({
'source': source,
'prs': prs,
})
if authors:
outstanding_per_author[source.author] += len(prs)
outstanding_per_group[source.author.commercial_partner_id] += len(prs)
if reviewers and source:
outstanding_per_reviewer[source.reviewed_by] += len(prs)
outstanding_per_group[source.reviewed_by.commercial_partner_id] += len(prs)
culprits = Partners.browse(p.id for p, _ in (outstanding_per_reviewer + outstanding_per_author).most_common())
return request.render('forwardport.outstanding', {
'authors': authors,
'reviewers': reviewers,
'single': partner,
'culprits': culprits,
'groups': groups,
'current_group': group,
'outstanding_per_author': outstanding_per_author,
'outstanding_per_reviewer': outstanding_per_reviewer,
'outstanding_per_group': outstanding_per_group,
'outstanding': outstandings,
'link': link,
})

View File

@ -4,10 +4,11 @@
<field name="model_id" ref="model_forwardport_batches"/>
<field name="state">code</field>
<field name="code">model._process()</field>
<field name="interval_number">1</field>
<field name="interval_type">minutes</field>
<field name="interval_number">6</field>
<field name="interval_type">hours</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">43</field>
</record>
<record model="ir.cron" id="updates">
@ -15,10 +16,11 @@
<field name="model_id" ref="model_forwardport_updates"/>
<field name="state">code</field>
<field name="code">model._process()</field>
<field name="interval_number">1</field>
<field name="interval_type">minutes</field>
<field name="interval_number">6</field>
<field name="interval_type">hours</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">46</field>
</record>
<record model="ir.cron" id="reminder">
@ -37,22 +39,9 @@
<field name="model_id" ref="model_forwardport_branch_remover"/>
<field name="state">code</field>
<field name="code">model._process()</field>
<field name="interval_number">1</field>
<field name="interval_number">6</field>
<field name="interval_type">hours</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
</record>
<record model="ir.cron" id="maintenance">
<field name="name">Maintenance of repo cache</field>
<field name="model_id" ref="model_forwardport_maintenance"/>
<field name="state">code</field>
<field name="code">model._run()</field>
<!-- run sunday morning as it can take a while, unlikely someone will need to forward-port stuff at that point -->
<field name="nextcall" eval="datetime.utcnow() + relativedelta(weekday=6, hour=2, minute=0, second=0, microsecond=0)"/>
<field name="interval_number">1</field>
<field name="interval_type">weeks</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
</record>
</odoo>

View File

@ -43,13 +43,4 @@
<field name="perm_write">0</field>
<field name="perm_unlink">0</field>
</record>
<record id="access_forwardport_maintenance" model="ir.model.access">
<field name="name">Access to maintenance is useless</field>
<field name="model_id" ref="model_forwardport_maintenance"/>
<field name="perm_read">0</field>
<field name="perm_create">0</field>
<field name="perm_write">0</field>
<field name="perm_unlink">0</field>
</record>
</odoo>

View File

@ -10,13 +10,12 @@
<xpath expr="//div[@id='alerts']" position="inside">
<t t-if="env['runbot_merge.pull_requests'].check_access_rights('read', False)">
<t t-set="outstanding" t-value="env['runbot_merge.pull_requests'].search_count([
('source_id', '!=', False),
('state', 'not in', ['merged', 'closed']),
('source_id.merge_date', '&lt;', datetime.datetime.now() - relativedelta(days=3)),
('state', 'in', ['opened', 'validated', 'approved', 'ready', 'error']),
('source_id.merge_date', '&lt;', datetime.datetime.now() - relativedelta(days=7)),
])"/>
<div t-if="outstanding != 0" class="alert col-md-12 alert-warning mb-0">
<a href="/forwardport/outstanding">
<t t-esc="outstanding"/> outstanding forward-ports
<t t-esc="outstanding"/> outstanding forward-ports (>1 week)
</a>
</div>
</t>
@ -30,67 +29,108 @@
<t t-else="">bg-warning</t>
</template>
<record id="forwardport.outstanding_fp" model="website.page">
<field name="name">Outstanding forward ports</field>
<field name="type">qweb</field>
<field name="url">/forwardport/outstanding</field>
<field name="website_indexed" eval="False"/>
<field name="is_published">True</field>
<field name="key">forwardport.outstanding_fp</field>
<field name="arch" type="xml">
<t name="Outstanding forward ports" t-name="forwardport.outstanding_fp">
<t t-call="website.layout">
<t t-set="hof" t-value="env['runbot_merge.pull_requests']._hall_of_shame()"/>
<div id="wrap" class="oe_structure oe_empty"><div class="container-fluid">
<ul class="alert bg-light list-inline">
<span t-foreach="hof.reviewers" t-as="count" class="list-inline-item">
<a t-attf-href="?reviewer={{count[0].id}}"
t-field="count[0].display_name"
t-att-title="count[1]"
/>
</span>
</ul>
<h1>List of pull requests with outstanding forward ports</h1>
<t t-set="reviewer" t-value="env['res.partner'].browse(int(request.params.get('reviewer') or 0))"/>
<form method="get" action="" id="reset-filter"/>
<h2 t-if="reviewer" class="text-muted">
merged by <span t-field="reviewer.display_name" t-attf-title="@{{reviewer.github_login}}"/>
<button form="reset-filter" type="submit"
name="reviewer" value=""
title="See All" class="btn fa fa-times"/>
</h2>
<dl><t t-foreach="hof.outstanding" t-as="x">
<t t-set="source" t-value="x[0]"/>
<t t-if="not reviewer or source.reviewed_by == reviewer">
<dt>
<a t-att-href="source.url"><span t-field="source.display_name"/></a>
by <span t-field="source.author.display_name"
t-attf-title="@{{source.author.github_login}}"/>
merged <span t-field="source.merge_date"
t-options="{'widget': 'relative'}"
t-att-title="source.merge_date"/>
<t t-if="not reviewer">
by <span t-field="source.reviewed_by.display_name"
t-attf-title="@{{source.reviewed_by.github_login}}"/>
</t>
</dt>
<dd>
Outstanding forward-ports:
<ul>
<li t-foreach="x.prs" t-as="p">
<a t-att-href="p.url"><span t-field="p.display_name"/></a>
(<span t-field="p.state"/>)
targeting <span t-field="p.target.name"/>
</li>
</ul>
</dd>
<template id="outstanding" name="Outstanding forward ports">
<t t-call="website.layout">
<div id="wrap" class="oe_structure oe_empty"><div class="container-fluid">
<div class="alert bg-light outstanding-partners">
<t t-foreach="groups" t-as="group">
<span>
<t t-if="group == current_group">
<span class="bg-primary">
<t t-out="group.display_name"/>
(<t t-out="outstanding_per_group[group]"/>)
</span>
<a t-att-href="link()" class="btn fa fa-times p-0"/>
</t>
</t></dl>
</div></div>
<t t-else="">
<a t-att-href="link(group=group.id, partner=0)">
<t t-out="group.display_name"/>
(<t t-out="outstanding_per_group[group]"/>)
</a>
</t>
</span>
</t>
</div>
<div class="alert bg-light outstanding-partners">
<t t-foreach="culprits" t-as="culprit">
<t t-set="approved" t-value="outstanding_per_reviewer[culprit]"/>
<t t-set="created" t-value="outstanding_per_author[culprit]"/>
<a t-att-href="link(partner=culprit.id)"
t-attf-title="approved {{approved}}, created {{created}}"
t-att-class="'bg-primary' if culprit == env.user.partner_id else None"
><t t-out="culprit.name"/>:
<t t-if="approved" t-out="approved"/>
<t t-if="approved and created"> + </t>
<t t-if="created" t-out="created"/>
</a>
</t>
</div>
<t t-if="not single">
by
<span class="btn-group btn-group-toggle">
<a t-att-href="link(authors=1, reviewers=1)"
t-attf-class="btn btn-sm btn-secondary {{'active' if authors and reviewers else ''}}">
both
</a>
<a t-att-href="link(authors=1, reviewers=0)"
t-attf-class="btn btn-sm btn-secondary {{'active' if authors and not reviewers else ''}}">
creators
</a>
<a t-att-href="link(reviewers=1, authors=0)"
t-attf-class="btn btn-sm btn-secondary {{'active' if reviewers and not authors else ''}}">
reviewers
</a>
</span>
</t>
</t>
</field>
</record>
<h1>List of pull requests with outstanding forward ports</h1>
<h2 t-if="single">
for <span t-field="single.display_name" t-attf-title="@{{single.github_login}}"/>
<a t-att-href="link(partner=0)" title="All Users" class="btn fa fa-times"/>
<span class="btn-group btn-group-toggle">
<a t-att-href="link(authors=1, reviewers=1)"
t-attf-class="btn btn-sm btn-secondary {{'active' if authors and reviewers else ''}}">
both
</a>
<a t-att-href="link(authors=1, reviewers=0)"
t-attf-class="btn btn-sm btn-secondary {{'active' if authors and not reviewers else ''}}">
created
</a>
<a t-att-href="link(reviewers=1, authors=0)"
t-attf-class="btn btn-sm btn-secondary {{'active' if reviewers and not authors else ''}}">
reviewed
</a>
</span>
</h2>
<dl><t t-foreach="outstanding" t-as="x">
<t t-set="source" t-value="x['source']"/>
<t t-if="not single or source.reviewed_by == single or source.author == single">
<dt>
<a t-att-href="source.url"><span t-field="source.display_name"/></a>
created by <span t-field="source.author.display_name"
t-attf-title="@{{source.author.github_login}}"/>
merged <span t-field="source.merge_date"
t-options="{'widget': 'relative'}"
t-att-title="source.merge_date"/>
by <span t-field="source.reviewed_by.display_name"
t-attf-title="@{{source.reviewed_by.github_login}}"/>
</dt>
<dd>
Outstanding forward-ports:
<ul>
<li t-foreach="x['prs']" t-as="p">
<a t-att-href="p.url"><span t-field="p.display_name"/></a>
(<span t-field="p.state"/>)
targeting <span t-field="p.target.name"/>
</li>
</ul>
</dd>
</t>
</t></dl>
</div></div>
</t>
</template>
<template id="view_pull_request" inherit_id="runbot_merge.view_pull_request">
<xpath expr="//dl[hasclass('runbot-merge-fields')]" position="inside">
@ -103,31 +143,6 @@
t-attf-title="@{{pr.reviewed_by.github_login}}"/>
</dd>
</t>
<t t-if="pr.source_id">
<dt>forward-port of</dt>
<dd>
<a t-att-href="pr.source_id.url">
<span t-field="pr.source_id.display_name"/>
</a>
<span t-if="not pr.parent_id"
class="badge text-bg-danger user-select-none"
title="A detached PR behaves like a non-forward-port, it has to be approved via the mergebot, this is usually caused by the forward-port having been in conflict or updated.">
DETACHED (<span t-out="pr.detach_reason" style="white-space: pre-wrap;"/>)
</span>
</dd>
</t>
<t t-if="pr.forwardport_ids">
<dt>forward-ports</dt>
<dd><ul>
<t t-foreach="pr.forwardport_ids" t-as="p">
<t t-set="bgsignal"><t t-call="forwardport.pr_background"/></t>
<li t-att-class="bgsignal">
<a t-att-href="p.url"><span t-field="p.display_name"/></a>
targeting <span t-field="p.target.name"/>
</li>
</t>
</ul></dd>
</t>
</xpath>
</template>
@ -142,8 +157,7 @@
<field string="Token" name="fp_github_token"/>
</group>
<group>
<field string="Bot Name" name="fp_github_name"/>
<field string="Bot Email" name="fp_github_email"/>
<field string="Bot Name" name="fp_github_name" readonly="0"/>
</group>
</group>
</xpath>
@ -152,12 +166,6 @@
help="Repository where forward port branches will be created"
/>
</xpath>
<xpath expr="//field[@name='branch_ids']/tree" position="inside">
<field name="fp_target" string="FP to"
help="This branch will be forward-ported to (from lower ones)"
/>
</xpath>
</field>
</record>
@ -173,37 +181,4 @@
</field>
</record>
<record model="ir.ui.view" id="pr">
<field name="name">Show forwardport PR fields</field>
<field name="inherit_id" ref="runbot_merge.runbot_merge_form_prs"/>
<field name="model">runbot_merge.pull_requests</field>
<field name="arch" type="xml">
<xpath expr="//field[@name='state']" position="after">
<field name="merge_date" attrs="{'invisible': [('state', '!=', 'merged')]}"/>
</xpath>
<xpath expr="//sheet/group[2]" position="after">
<separator string="Forward Port" attrs="{'invisible': [('source_id', '=', False)]}"/>
<group attrs="{'invisible': [('source_id', '!=', False)]}">
<group>
<field string="Policy" name="fw_policy"/>
</group>
</group>
<group attrs="{'invisible': [('source_id', '=', False)]}">
<group>
<field string="Original PR" name="source_id"/>
</group>
<group attrs="{'invisible': [('parent_id', '=', False)]}">
<field name="parent_id"/>
</group>
<group colspan="4" attrs="{'invisible': [('parent_id', '!=', False)]}">
<field string="Detached because" name="detach_reason" readonly="1"/>
</group>
<group>
<field string="Forward ported up to" name="limit_id"/>
</group>
</group>
</xpath>
</field>
</record>
</odoo>

View File

@ -0,0 +1,9 @@
import pathlib
from odoo.tools.appdirs import user_cache_dir
def migrate(_cr, _version):
# avoid needing to re-clone our repo unnecessarily
pathlib.Path(user_cache_dir('forwardport')).rename(
pathlib.Path(user_cache_dir('mergebot')))

View File

@ -0,0 +1,7 @@
def migrate(cr, version):
cr.execute("ALTER TABLE runbot_merge_project DROP COLUMN IF EXISTS fp_github_email")
cr.execute("""
ALTER TABLE runbot_merge_branch
DROP COLUMN IF EXISTS fp_sequence,
DROP COLUMN IF EXISTS fp_target
""")

View File

@ -1,20 +1,21 @@
# -*- coding: utf-8 -*-
import builtins
import logging
import pathlib
import resource
import subprocess
import uuid
import re
from contextlib import ExitStack
from datetime import datetime, timedelta
import requests
import sentry_sdk
from dateutil import relativedelta
from odoo import fields, models
from odoo.addons.runbot_merge import git
from odoo.addons.runbot_merge.github import GH
from odoo.tools.appdirs import user_cache_dir
# how long a merged PR survives
MERGE_AGE = relativedelta.relativedelta(weeks=2)
FOOTER = '\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n'
_logger = logging.getLogger(__name__)
@ -26,19 +27,34 @@ class Queue:
raise NotImplementedError
def _process(self):
for b in self.search(self._search_domain(), order='create_date, id', limit=self.limit):
skip = 0
from_clause, where_clause, params = self._search(self._search_domain(), order='create_date, id', limit=1).get_sql()
for _ in range(self.limit):
self.env.cr.execute(f"""
SELECT id FROM {from_clause}
WHERE {where_clause or "true"}
ORDER BY create_date, id
LIMIT 1 OFFSET %s
FOR UPDATE SKIP LOCKED
""", [*params, skip])
b = self.browse(self.env.cr.fetchone())
if not b:
return
try:
b._process_item()
with sentry_sdk.start_span(description=self._name):
b._process_item()
b.unlink()
self.env.cr.commit()
except Exception:
_logger.exception("Error while processing %s, skipping", b)
self.env.cr.rollback()
b._on_failure()
if b._on_failure():
skip += 1
self.env.cr.commit()
def _on_failure(self):
pass
return True
def _search_domain(self):
return []
@ -49,13 +65,25 @@ class ForwardPortTasks(models.Model, Queue):
limit = 10
batch_id = fields.Many2one('runbot_merge.batch', required=True)
batch_id = fields.Many2one('runbot_merge.batch', required=True, index=True)
source = fields.Selection([
('merge', 'Merge'),
('fp', 'Forward Port Followup'),
('insert', 'New branch port')
('insert', 'New branch port'),
('complete', 'Complete ported batches'),
], required=True)
retry_after = fields.Datetime(required=True, default='1900-01-01 01:01:01')
pr_id = fields.Many2one('runbot_merge.pull_requests')
def create(self, vals_list):
self.env.ref('forwardport.port_forward')._trigger()
return super().create(vals_list)
def write(self, vals):
if retry := vals.get('retry_after'):
self.env.ref('forwardport.port_forward')\
._trigger(fields.Datetime.to_datetime(retry))
return super().write(vals)
def _search_domain(self):
return super()._search_domain() + [
@ -68,44 +96,169 @@ class ForwardPortTasks(models.Model, Queue):
def _process_item(self):
batch = self.batch_id
newbatch = batch.prs._port_forward()
sentry_sdk.set_tag('forward-porting', batch.prs.mapped('display_name'))
if self.source == 'complete':
self._complete_batches()
return
if newbatch:
_logger.info(
"Processing %s (from %s): %s (%s) -> %s (%s)",
self.id, self.source,
batch, batch.prs,
newbatch, newbatch.prs,
)
# insert new batch in ancestry sequence unless conflict (= no parent)
if self.source == 'insert':
for pr in newbatch.prs:
if not pr.parent_id:
break
newchild = pr.search([
('parent_id', '=', pr.parent_id.id),
('id', '!=', pr.id),
])
if newchild:
newchild.parent_id = pr.id
else: # reached end of seq (or batch is empty)
newbatch = batch._port_forward()
if not newbatch: # reached end of seq (or batch is empty)
# FIXME: or configuration is fucky so doesn't want to FP (maybe should error and retry?)
_logger.info(
"Processing %s (from %s): %s (%s) -> end of the sequence",
self.id, self.source,
batch, batch.prs
"Processed %s from %s (%s) -> end of the sequence",
batch, self.source, batch.prs.mapped('display_name'),
)
batch.active = False
return
_logger.info(
"Processed %s from %s (%s) -> %s (%s)",
batch, self.source, ', '.join(batch.prs.mapped('display_name')),
newbatch, ', '.join(newbatch.prs.mapped('display_name')),
)
# insert new batch in ancestry sequence
if self.source == 'insert':
self._process_insert(batch, newbatch)
def _process_insert(self, batch, newbatch):
self.env['runbot_merge.batch'].search([
('parent_id', '=', batch.id),
('id', '!=', newbatch.id),
]).parent_id = newbatch.id
# insert new PRs in ancestry sequence unless conflict (= no parent)
for pr in newbatch.prs:
next_target = pr._find_next_target()
if not next_target:
continue
# should have one since it was inserted before an other PR?
descendant = pr.search([
('target', '=', next_target.id),
('source_id', '=', pr.source_id.id),
])
# copy the reviewing of the "descendant" (even if detached) to this pr
if reviewer := descendant.reviewed_by:
pr.reviewed_by = reviewer
# replace parent_id *if not detached*
if descendant.parent_id:
descendant.parent_id = pr.id
def _complete_batches(self):
source = pr = self.pr_id
if not pr:
_logger.warning(
"Unable to complete descendants of %s (%s): no new PR",
self.batch_id,
self.batch_id.prs.mapped('display_name'),
)
return
_logger.info(
"Completing batches for descendants of %s (added %s)",
self.batch_id.prs.mapped('display_name'),
self.pr_id.display_name,
)
gh = requests.Session()
repository = pr.repository
gh.headers['Authorization'] = f'token {repository.project_id.fp_github_token}'
PullRequests = self.env['runbot_merge.pull_requests']
self.env.cr.execute('LOCK runbot_merge_pull_requests IN SHARE MODE')
# TODO: extract complete list of targets from `_find_next_target`
# so we can create all the forwardport branches, push them, and
# only then create the PR objects
# TODO: maybe do that after making forward-port WC-less, so all the
# branches can be pushed atomically at once
for descendant in self.batch_id.descendants():
target = pr._find_next_target()
if target is None:
_logger.info("Will not forward-port %s: no next target", pr.display_name)
return
if PullRequests.search_count([
('source_id', '=', source.id),
('target', '=', target.id),
('state', 'not in', ('closed', 'merged')),
]):
_logger.warning("Will not forward-port %s: already ported", pr.display_name)
return
if target != descendant.target:
self.env['runbot_merge.pull_requests.feedback'].create({
'repository': repository.id,
'pull_request': source.id,
'token_field': 'fp_github_token',
'message': """\
{pr.ping}unable to port this PR forwards due to inconsistency: goes from \
{pr.target.name} to {next_target.name} but {batch} ({batch_prs}) targets \
{batch.target.name}.
""".format(pr=pr, next_target=target, batch=descendant, batch_prs=', '.join(descendant.mapped('prs.display_name')))
})
return
ref = descendant.prs[:1].refname
# NOTE: ports the new source everywhere instead of porting each
# PR to the next step as it does not *stop* on conflict
repo = git.get_local(source.repository)
conflict, head = source._create_fp_branch(repo, target)
repo.push(git.fw_url(pr.repository), f'{head}:refs/heads/{ref}')
remote_target = repository.fp_remote_target
owner, _ = remote_target.split('/', 1)
message = source.message + f"\n\nForward-Port-Of: {pr.display_name}"
title, body = re.match(r'(?P<title>[^\n]+)\n*(?P<body>.*)', message, flags=re.DOTALL).groups()
r = gh.post(f'https://api.github.com/repos/{pr.repository.name}/pulls', json={
'base': target.name,
'head': f'{owner}:{ref}',
'title': '[FW]' + (' ' if title[0] != '[' else '') + title,
'body': body
})
if not r.ok:
_logger.warning("Failed to create forward-port PR for %s, deleting branches", pr.display_name)
# delete all the branches this should automatically close the
# PRs if we've created any. Using the API here is probably
# simpler than going through the working copies
d = gh.delete(f'https://api.github.com/repos/{remote_target}/git/refs/heads/{ref}')
if d.ok:
_logger.info("Deleting %s:%s=success", remote_target, ref)
else:
_logger.warning("Deleting %s:%s=%s", remote_target, ref, d.text)
raise RuntimeError(f"Forwardport failure: {pr.display_name} ({r.text})")
new_pr = PullRequests._from_gh(r.json())
_logger.info("Created forward-port PR %s", new_pr)
new_pr.write({
'batch_id': descendant.id, # should already be set correctly but...
'merge_method': pr.merge_method,
'source_id': source.id,
# only link to previous PR of sequence if cherrypick passed
# FIXME: apply parenting of siblings? Apply parenting *to* siblings?
'parent_id': pr.id if not conflict else False,
'detach_reason': "{1}\n{2}".format(*conflict).strip() if conflict else None,
})
if conflict:
self.env.ref('runbot_merge.forwardport.failure.conflict')._send(
repository=pr.repository,
pull_request=pr.number,
token_field='fp_github_token',
format_args={'source': source, 'pr': pr, 'new': new_pr, 'footer': FOOTER},
)
new_pr._fp_conflict_feedback(pr, {pr: conflict})
labels = ['forwardport']
if conflict:
labels.append('conflict')
self.env['runbot_merge.pull_requests.tagging'].create({
'repository': new_pr.repository.id,
'pull_request': new_pr.number,
'tags_add': labels,
})
pr = new_pr
CONFLICT_TEMPLATE = "{ping}WARNING: the latest change ({previous.head}) triggered " \
"a conflict when updating the next forward-port " \
"({next.display_name}), and has been ignored.\n\n" \
"You will need to update this pull request differently, " \
"or fix the issue by hand on {next.display_name}."
CHILD_CONFLICT = "{ping}WARNING: the update of {previous.display_name} to " \
"{previous.head} has caused a conflict in this pull request, " \
"data may have been lost."
class UpdateQueue(models.Model, Queue):
_name = 'forwardport.updates'
_description = 'if a forward-port PR gets updated & has followups (cherrypick succeeded) the followups need to be updated as well'
@ -115,9 +268,13 @@ class UpdateQueue(models.Model, Queue):
original_root = fields.Many2one('runbot_merge.pull_requests')
new_root = fields.Many2one('runbot_merge.pull_requests')
def create(self, vals_list):
self.env.ref('forwardport.updates')._trigger()
return super().create(vals_list)
def _process_item(self):
Feedback = self.env['runbot_merge.pull_requests.feedback']
previous = self.new_root
sentry_sdk.set_tag("update-root", self.new_root.display_name)
with ExitStack() as s:
for child in self.new_root._iter_descendants():
self.env.cr.execute("""
@ -134,45 +291,39 @@ class UpdateQueue(models.Model, Queue):
self.new_root.display_name
)
if child.state in ('closed', 'merged'):
Feedback.create({
'repository': child.repository.id,
'pull_request': child.number,
'message': "%sancestor PR %s has been updated but this PR"
" is %s and can't be updated to match."
"\n\n"
"You may want or need to manually update any"
" followup PR." % (
child.ping(),
self.new_root.display_name,
child.state,
)
})
self.env.ref('runbot_merge.forwardport.updates.closed')._send(
repository=child.repository,
pull_request=child.number,
token_field='fp_github_token',
format_args={'pr': child, 'parent': self.new_root},
)
return
conflicts, working_copy = previous._create_fp_branch(
child.target, child.refname, s)
repo = git.get_local(previous.repository)
conflicts, new_head = previous._create_fp_branch(repo, child.target)
if conflicts:
_, out, err, _ = conflicts
Feedback.create({
'repository': previous.repository.id,
'pull_request': previous.number,
'message': CONFLICT_TEMPLATE.format(
ping=previous.ping(),
previous=previous,
next=child
)
})
Feedback.create({
'repository': child.repository.id,
'pull_request': child.number,
'message': CHILD_CONFLICT.format(ping=child.ping(), previous=previous, next=child)\
+ (f'\n\nstdout:\n```\n{out.strip()}\n```' if out.strip() else '')
+ (f'\n\nstderr:\n```\n{err.strip()}\n```' if err.strip() else '')
})
self.env.ref('runbot_merge.forwardport.updates.conflict.parent')._send(
repository=previous.repository,
pull_request=previous.number,
token_field='fp_github_token',
format_args={'pr': previous, 'next': child},
)
self.env.ref('runbot_merge.forwardport.updates.conflict.child')._send(
repository=child.repository,
pull_request=child.number,
token_field='fp_github_token',
format_args={
'previous': previous,
'pr': child,
'stdout': (f'\n\nstdout:\n```\n{out.strip()}\n```' if out.strip() else ''),
'stderr': (f'\n\nstderr:\n```\n{err.strip()}\n```' if err.strip() else ''),
},
)
new_head = working_copy.stdout().rev_parse(child.refname).stdout.decode().strip()
commits_count = int(working_copy.stdout().rev_list(
f'{child.target.name}..{child.refname}',
commits_count = int(repo.stdout().rev_list(
f'{child.target.name}..{new_head}',
count=True
).stdout.decode().strip())
old_head = child.head
@ -182,16 +333,11 @@ class UpdateQueue(models.Model, Queue):
# 'state': 'opened',
'squash': commits_count == 1,
})
# push the new head to the local cache: in some cases github
# doesn't propagate revisions fast enough so on the next loop we
# can't find the revision we just pushed
dummy_branch = str(uuid.uuid4())
ref = previous._get_local_directory()
working_copy.push(ref._directory, f'{new_head}:refs/heads/{dummy_branch}')
ref.branch('--delete', '--force', dummy_branch)
# then update the child's branch to the new head
working_copy.push(f'--force-with-lease={child.refname}:{old_head}',
'target', child.refname)
repo.push(
f'--force-with-lease={child.refname}:{old_head}',
git.fw_url(child.repository),
f"{new_head}:refs/heads/{child.refname}")
# committing here means github could technically trigger its
# webhook before sending a response, but committing before
@ -211,8 +357,12 @@ class DeleteBranches(models.Model, Queue):
pr_id = fields.Many2one('runbot_merge.pull_requests')
def create(self, vals_list):
self.env.ref('forwardport.remover')._trigger(datetime.now() - MERGE_AGE)
return super().create(vals_list)
def _search_domain(self):
cutoff = self.env.context.get('forwardport_merged_before') \
cutoff = getattr(builtins, 'forwardport_merged_before', None) \
or fields.Datetime.to_string(datetime.now() - MERGE_AGE)
return [('pr_id.merge_date', '<', cutoff)]
@ -270,46 +420,3 @@ class DeleteBranches(models.Model, Queue):
r.json()
)
_deleter.info('✔ deleted branch %s of PR %s', self.pr_id.label, self.pr_id.display_name)
_gc = _logger.getChild('maintenance')
def _bypass_limits():
"""Allow git to go beyond the limits set for Odoo.
On large repositories, git gc can take a *lot* of memory (especially with
`--aggressive`), if the Odoo limits are too low this can prevent the gc
from running, leading to a lack of packing and a massive amount of cruft
accumulating in the working copy.
"""
resource.setrlimit(resource.RLIMIT_AS, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
class GC(models.TransientModel):
_name = 'forwardport.maintenance'
_description = "Weekly maintenance of... cache repos?"
def _run(self):
# lock out the forward port cron to avoid concurrency issues while we're
# GC-ing it: wait until it's available, then SELECT FOR UPDATE it,
# which should prevent cron workers from running it
fp_cron = self.env.ref('forwardport.port_forward')
self.env.cr.execute("""
SELECT 1 FROM ir_cron
WHERE id = %s
FOR UPDATE
""", [fp_cron.id])
repos_dir = pathlib.Path(user_cache_dir('forwardport'))
# run on all repos with a forwardport target (~ forwardport enabled)
for repo in self.env['runbot_merge.repository'].search([('fp_remote_target', '!=', False)]):
repo_dir = repos_dir / repo.name
if not repo_dir.is_dir():
continue
_gc.info('Running maintenance on %s', repo.name)
r = subprocess.run(
['git', '--git-dir', repo_dir, 'gc', '--aggressive', '--prune=now'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
encoding='utf-8',
preexec_fn = _bypass_limits,
)
if r.returncode:
_gc.warning("Maintenance failure (status=%d):\n%s", r.returncode, r.stdout)

File diff suppressed because it is too large Load Diff

View File

@ -22,5 +22,7 @@ class FreezeWizard(models.Model):
def unlink(self):
r = super().unlink()
if not (self.env.context.get('forwardport_keep_disabled') or self.search_count([])):
self.env.ref('forwardport.port_forward').active = True
cron = self.env.ref('forwardport.port_forward')
cron.active = True
cron._trigger() # process forward ports enqueued during the freeze period
return r

View File

@ -4,18 +4,6 @@ import re
import pytest
import requests
@pytest.fixture
def default_crons():
return [
'runbot_merge.process_updated_commits',
'runbot_merge.merge_cron',
'runbot_merge.staging_cron',
'forwardport.port_forward',
'forwardport.updates',
'runbot_merge.check_linked_prs_status',
'runbot_merge.feedback_cron',
]
# public_repo — necessary to leave comments
# admin:repo_hook — to set up hooks (duh)
# delete_repo — to cleanup repos created under a user

View File

@ -1,4 +1,6 @@
from utils import Commit, make_basic
import re
from utils import Commit, make_basic, to_pr, seen, matches
def test_single_updated(env, config, make_repo):
@ -87,3 +89,313 @@ def test_single_updated(env, config, make_repo):
assert pr22_id.source_id == pr2_id
assert pr22_id.parent_id == pr21_id
def test_closing_during_fp(env, config, make_repo, users):
""" Closing a PR after it's been ported once should not port it further, but
the rest of the batch should carry on
"""
r1, _ = make_basic(env, config, make_repo)
r2, _ = make_basic(env, config, make_repo)
env['runbot_merge.repository'].search([]).required_statuses = 'default'
with r1, r2:
r1.make_commits('a', Commit('1', tree={'1': '0'}), ref='heads/aref')
pr1 = r1.make_pr(target='a', head='aref')
r1.post_status('aref', 'success')
pr1.post_comment('hansen r+', config['role_reviewer']['token'])
r2.make_commits('a', Commit('2', tree={'2': '0'}), ref='heads/aref')
pr2 = r2.make_pr(target='a', head='aref')
r2.post_status('aref', 'success')
pr2.post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
with r1, r2:
r1.post_status('staging.a', 'success')
r2.post_status('staging.a', 'success')
env.run_crons()
pr1_id = to_pr(env, pr1)
[pr1_1_id] = pr1_id.forwardport_ids
pr2_id = to_pr(env, pr2)
[pr2_1_id] = pr2_id.forwardport_ids
with r1:
r1.get_pr(pr1_1_id.number).close(config['role_user']['token'])
with r2:
r2.post_status(pr2_1_id.head, 'success')
env.run_crons()
assert env['runbot_merge.pull_requests'].search_count([]) == 5,\
"only one of the forward ports should be ported"
assert not env['runbot_merge.pull_requests'].search([('parent_id', '=', pr1_1_id.id)]),\
"the closed PR should not be ported"
assert env['runbot_merge.pull_requests'].search([('source_id', '=', pr1_id.id)]) == pr1_1_id,\
"the closed PR should not be ported"
r1_b_head = r1.commit("b")
with r2:
r2.get_pr(pr2_1_id.number).post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
assert not pr2_1_id.blocked
assert not pr2_1_id.batch_id.blocked
st = pr2_1_id.staging_id
assert st
with r1, r2:
r1.post_status('staging.b', 'success')
r2.post_status('staging.b', 'success')
env.run_crons()
assert st.state == 'success'
assert r1_b_head.id == r1.commit("b").id, \
"r1:b's head should not have been touched"
def test_add_pr_during_fp(env, config, make_repo, users):
""" It should be possible to add new PRs to an FP batch
"""
r1, _ = make_basic(env, config, make_repo, statuses="default")
r2, fork2 = make_basic(env, config, make_repo, statuses="default")
# needs a "d" branch
env['runbot_merge.project'].search([]).write({
'branch_ids': [(0, 0, {'name': 'd', 'sequence': 40})],
})
with r1, r2:
r1.make_ref("heads/d", r1.commit("c").id)
r2.make_ref("heads/d", r2.commit("c").id)
with r1:
r1.make_commits('a', Commit('1', tree={'1': '0'}), ref='heads/aref')
pr1_a = r1.make_pr(target='a', head='aref')
r1.post_status('aref', 'success')
pr1_a.post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
with r1, r2:
r1.post_status('staging.a', 'success')
r2.post_status('staging.a', 'success')
env.run_crons()
pr1_a_id = to_pr(env, pr1_a)
[pr1_b_id] = pr1_a_id.forwardport_ids
with r2, fork2:
fork2.make_commits('b', Commit('2', tree={'2': '0'}), ref=f'heads/{pr1_b_id.refname}')
pr2_b = r2.make_pr(title="B", target='b', head=f'{fork2.owner}:{pr1_b_id.refname}')
env.run_crons()
pr2_b_id = to_pr(env, pr2_b)
assert not pr1_b_id.staging_id
assert not pr2_b_id.staging_id
assert pr1_b_id.batch_id == pr2_b_id.batch_id
assert pr1_b_id.state == "opened",\
"implicit approval from forward port should have been canceled"
batch = pr2_b_id.batch_id
with r1:
r1.post_status(pr1_b_id.head, 'success')
r1.get_pr(pr1_b_id.number).post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
assert batch.blocked
assert pr1_b_id.blocked
with r2:
r2.post_status(pr2_b.head, "success")
pr2_b.post_comment("hansen r+", config['role_reviewer']['token'])
env.run_crons()
assert not batch.blocked
assert pr1_b_id.staging_id and pr1_b_id.staging_id == pr2_b_id.staging_id
with r1, r2:
r1.post_status('staging.b', 'success')
r2.post_status('staging.b', 'success')
env.run_crons()
def find_child(pr):
return env['runbot_merge.pull_requests'].search([
('parent_id', '=', pr.id),
])
pr1_c_id = find_child(pr1_b_id)
assert pr1_c_id
pr2_c_id = find_child(pr2_b_id)
assert pr2_c_id
with r1, r2:
r1.post_status(pr1_c_id.head, 'success')
r2.post_status(pr2_c_id.head, 'success')
env.run_crons()
assert find_child(pr1_c_id)
assert find_child(pr2_c_id)
def test_add_to_forward_ported(env, config, make_repo, users):
"""Add a new branch to an intermediate step of a fw *sequence*, either
because skipci or because all the intermediate CI succeeded
"""
# region setup
r1, _ = make_basic(env, config, make_repo, statuses="default")
r2, fork2 = make_basic(env, config, make_repo, statuses="default")
with r1:
r1.make_commits('a', Commit('a', tree={'a': 'a'}), ref="heads/pr1")
pr1_a = r1.make_pr(target="a", head="pr1")
r1.post_status(pr1_a.head, 'success')
pr1_a.post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
with r1, r2:
r1.post_status('staging.a', 'success')
r2.post_status('staging.a', 'success')
env.run_crons()
# region port forward
pr1_a_id = to_pr(env, pr1_a)
pr1_b_id = pr1_a_id.forwardport_ids
assert pr1_b_id
with r1:
r1.post_status(pr1_b_id.head, 'success')
env.run_crons()
pr1_c_id = pr1_a_id.forwardport_ids - pr1_b_id
assert pr1_c_id
# endregion
# endregion
# new PR must be in fork for labels to actually match
with r2, fork2:
# branch in fork has no owner prefix, but HEAD for cross-repo PR does
fork2.make_commits("b", Commit('b', tree={'b': 'b'}), ref=f'heads/{pr1_b_id.refname}')
pr2_b = r2.make_pr(title="b", target="b", head=pr1_b_id.label)
r2.post_status(pr2_b.head, 'success')
env.run_crons()
pr2_b_id = to_pr(env, pr2_b)
assert pr2_b_id.batch_id == pr1_b_id.batch_id
assert len(pr2_b_id.forwardport_ids) == 1, \
"since the batch is already forward ported, the new PR should" \
" immediately be forward ported to match"
assert pr2_b_id.forwardport_ids.label == pr1_c_id.label
pr2_a = r1.get_pr(pr1_b_id.number)
with r1, r2:
pr2_a.post_comment('hansen r+', config['role_reviewer']['token'])
pr2_b.post_comment("hansen r+", config['role_reviewer']['token'])
env.run_crons()
with r1, r2:
r1.post_status('staging.b', 'success')
r2.post_status('staging.b', 'success')
env.run_crons()
assert pr1_b_id.state == 'merged'
assert pr2_b_id.state == 'merged'
assert len(pr2_b_id.forwardport_ids) == 1,\
"verify that pr2_b did not get forward ported again on merge"
pr2_c = r2.get_pr(pr2_b_id.forwardport_ids.number)
assert pr2_c.comments == [
seen(env, pr2_c, users),
(users['user'], '''\
@{user} this PR targets c and is the last of the forward-port chain.
To merge the full chain, use
> @hansen r+
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
'''.format_map(users)),
]
def test_add_to_forward_port_conflict(env, config, make_repo, users):
"""If a PR is added to an existing forward port sequence, and it causes
conflicts when forward ported, it should be treated similarly to an *update*
causing a conflict: the PR is still created, but it's set in conflict.
"""
# region setup
r1, _ = make_basic(env, config, make_repo, statuses="default")
r2, fork2 = make_basic(env, config, make_repo, statuses="default")
project = env['runbot_merge.project'].search([])
with r2:
r2.make_commits(
"c",
Commit("C-onflict", tree={"b": "X"}),
ref="heads/c"
)
with r1:
r1.make_commits('a', Commit('a', tree={'a': 'a'}), ref="heads/pr1")
pr1_a = r1.make_pr(target="a", head="pr1")
r1.post_status(pr1_a.head, 'success')
pr1_a.post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
with r1, r2:
r1.post_status('staging.a', 'success')
r2.post_status('staging.a', 'success')
env.run_crons()
# region port forward
pr1_a_id = to_pr(env, pr1_a)
pr1_b_id = pr1_a_id.forwardport_ids
assert pr1_b_id
with r1:
r1.post_status(pr1_b_id.head, 'success')
env.run_crons()
pr1_c_id = pr1_a_id.forwardport_ids - pr1_b_id
assert pr1_c_id
# endregion
# endregion
# new PR must be in fork for labels to actually match
with r2, fork2:
# branch in fork has no owner prefix, but HEAD for cross-repo PR does
fork2.make_commits("b", Commit('b', tree={'b': 'b'}), ref=f'heads/{pr1_b_id.refname}')
pr2_b = r2.make_pr(title="b", target="b", head=pr1_b_id.label)
r2.post_status(pr2_b.head, 'success')
env.run_crons()
pr2_b_id = to_pr(env, pr2_b)
assert pr2_b_id.batch_id == pr1_b_id.batch_id
pr2_c_id = pr2_b_id.forwardport_ids
assert len(pr2_c_id) == 1, \
"since the batch is already forward ported, the new PR should" \
" immediately be forward ported to match"
assert pr2_c_id.label == pr1_c_id.label
assert not pr2_c_id.parent_id, "conflict -> should be detached"
assert pr2_c_id.detach_reason
pr2_a = r1.get_pr(pr1_b_id.number)
with r1, r2:
pr2_a.post_comment('hansen r+', config['role_reviewer']['token'])
pr2_b.post_comment("hansen r+", config['role_reviewer']['token'])
env.run_crons()
with r1, r2:
r1.post_status('staging.b', 'success')
r2.post_status('staging.b', 'success')
env.run_crons()
assert pr1_b_id.state == 'merged'
assert pr2_b_id.state == 'merged'
pr2_c = r2.get_pr(pr2_c_id.number)
assert pr2_c.comments == [
seen(env, pr2_c, users),
# should have conflicts
(users['user'], """@{user} cherrypicking of pull request {previous.display_name} failed.
stdout:
```
Auto-merging b
CONFLICT (add/add): Merge conflict in b
```
Either perform the forward-port manually (and push to this branch, proceeding as usual) or close this PR (maybe?).
In the former case, you may want to edit this PR message as well.
:warning: after resolving this conflict, you will need to merge it via @{project.github_prefix}.
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
""".format(project=project, previous=pr2_b_id, **users))
]

View File

@ -1,8 +1,11 @@
import random
import re
import time
from operator import itemgetter
from utils import make_basic, Commit, validate_all, re_matches, seen, REF_PATTERN, to_pr
import pytest
from utils import make_basic, Commit, validate_all, matches, seen, REF_PATTERN, to_pr
def test_conflict(env, config, make_repo, users):
@ -16,7 +19,7 @@ def test_conflict(env, config, make_repo, users):
project = env['runbot_merge.project'].search([])
project.write({
'branch_ids': [
(0, 0, {'name': 'd', 'sequence': 40, 'fp_target': True})
(0, 0, {'name': 'd', 'sequence': 40})
]
})
@ -50,6 +53,7 @@ def test_conflict(env, config, make_repo, users):
assert prc_id.state == 'opened'
p = prod.commit(p_0)
prc = prod.get_pr(prc_id.number)
c = prod.commit(prc_id.head)
assert c.author == p.author
# ignore date as we're specifically not keeping the original's
@ -58,14 +62,36 @@ def test_conflict(env, config, make_repo, users):
assert prod.read_tree(c) == {
'f': 'c',
'g': 'a',
'h': re_matches(r'''<<<\x3c<<< HEAD
'h': matches('''<<<\x3c<<< $$
a
|||||||| parent of [\da-f]{7,}.*
||||||| $$
=======
xxx
>>>\x3e>>> [\da-f]{7,}.*
>>>\x3e>>> $$
'''),
}
assert prc.comments == [
seen(env, prc, users),
(users['user'],
f'''@{users['user']} @{users['reviewer']} cherrypicking of pull request {pra_id.display_name} failed.
stdout:
```
Auto-merging h
CONFLICT (add/add): Merge conflict in h
```
Either perform the forward-port manually (and push to this branch, proceeding as usual) or close this PR (maybe?).
In the former case, you may want to edit this PR message as well.
:warning: after resolving this conflict, you will need to merge it via @{project.github_prefix}.
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
''')
]
prb = prod.get_pr(prb_id.number)
assert prb.comments == [
seen(env, prb, users),
@ -76,13 +102,12 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
'''),
(users['user'], """@%s @%s the next pull request (%s) is in conflict. \
You can merge the chain up to here by saying
> @%s r+
> @hansen r+
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
""" % (
users['user'], users['reviewer'],
prc_id.display_name,
project.fp_github_name
))
]
@ -148,6 +173,94 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
'i': 'a',
}
def test_massive_conflict(env, config, make_repo):
"""If the conflict is large enough, the commit message may exceed ARG_MAX
and trigger E2BIG.
"""
# CONFLICT (modify/delete): <file> deleted in <commit> (<title>) and modified in HEAD. Version HEAD of <file> left in tree.
#
# 107 + 2 * len(filename) + len(title) per conflicting file.
# - filename: random.randbytes(10).hex() -> 20
# - title: random.randbytes(20).hex() -> 40
# -> 701 (!) files
files = []
while len(files) < 1500:
files.append(random.randbytes(10).hex())
# region setup
project = env['runbot_merge.project'].create({
'name': "thing",
'github_token': config['github']['token'],
'github_prefix': 'hansen',
'fp_github_token': config['github']['token'],
'fp_github_name': 'herbert',
'branch_ids': [
(0, 0, {'name': 'a', 'sequence': 100}),
(0, 0, {'name': 'b', 'sequence': 80}),
],
})
repo = make_repo("repo")
env['runbot_merge.events_sources'].create({'repository': repo.name})
repo_id = env['runbot_merge.repository'].create({
'project_id': project.id,
'name': repo.name,
'required_statuses': "default",
'fp_remote_target': repo.name,
'group_id': False,
})
env['res.partner'].search([
('github_login', '=', config['role_reviewer']['user'])
]).write({
'review_rights': [(0, 0, {'repository_id': repo_id.id, 'review': True})]
})
with repo:
# create branch with a ton of empty files
repo.make_commits(
None,
Commit(
random.randbytes(20).hex(),
tree=dict.fromkeys(files, "xoxo"),
),
ref='heads/a',
)
# removes all those files in the next branch
repo.make_commits(
'a',
Commit(
random.randbytes(20).hex(),
tree=dict.fromkeys(files, "content!"),
),
ref='heads/b',
)
# endregion setup
with repo:
# update all the files
repo.make_commits(
'a',
Commit(random.randbytes(20).hex(), tree={'a': '1'}),
Commit(random.randbytes(20).hex(), tree={'x': '1'}, reset=True),
ref='heads/change',
)
pr = repo.make_pr(target='a', head='change')
repo.post_status('refs/heads/change', 'success')
pr.post_comment('hansen rebase-ff r+', config['role_reviewer']['token'])
env.run_crons()
with repo:
repo.post_status('staging.a', 'success')
env.run_crons()
# we don't actually need more, the bug crashes the forward port entirely so
# the PR is never even created
_pra_id, _prb_id = env['runbot_merge.pull_requests'].search([], order='number')
def test_conflict_deleted(env, config, make_repo):
prod, other = make_basic(env, config, make_repo)
# remove f from b
@ -269,6 +382,7 @@ def test_multiple_commits_same_authorship(env, config, make_repo):
assert get(c.author) == get(author)
assert get(c.committer) == get(committer)
def test_multiple_commits_different_authorship(env, config, make_repo, users, rolemap):
""" When a PR has multiple commits by different authors, the resulting
(squashed) conflict commit should have an empty email
@ -316,11 +430,11 @@ def test_multiple_commits_different_authorship(env, config, make_repo, users, ro
c = prod.commit(pr2_id.head)
assert len(c.parents) == 1
get = itemgetter('name', 'email')
rm = rolemap['user']
assert get(c.author) == (rm['login'], ''), \
bot = pr_id.repository.project_id.fp_github_name
assert get(c.author) == (bot, ''), \
"In a multi-author PR, the squashed conflict commit should have the " \
"author set to the bot but an empty email"
assert get(c.committer) == (rm['login'], '')
assert get(c.committer) == (bot, '')
assert re.match(r'''<<<\x3c<<< HEAD
b
@ -345,7 +459,7 @@ b
assert pr2.comments == [
seen(env, pr2, users),
(users['user'], re_matches(r'@%s @%s .*CONFLICT' % (users['user'], users['reviewer']), re.DOTALL)),
(users['user'], matches('@%s @%s $$CONFLICT' % (users['user'], users['reviewer']))),
(users['reviewer'], 'hansen r+'),
(users['user'], f"@{users['user']} @{users['reviewer']} unable to stage: "
"All commits must have author and committer email, "

View File

@ -1,126 +1,96 @@
# -*- coding: utf-8 -*-
import collections
import time
import pytest
from utils import seen, Commit, make_basic
from utils import seen, Commit, make_basic, to_pr
Description = collections.namedtuple('Restriction', 'source limit')
def test_configure(env, config, make_repo):
""" Checks that configuring an FP limit on a PR is respected
* limits to not the latest
* limits to the current target (= no FP)
* limits to an earlier branch (???)
"""
prod, other = make_basic(env, config, make_repo)
bot_name = env['runbot_merge.project'].search([]).fp_github_name
descriptions = [
Description(source='a', limit='b'),
Description(source='b', limit='b'),
Description(source='b', limit='a'),
]
originals = []
@pytest.mark.parametrize('source,limit,count', [
pytest.param('a', 'b', 1, id='not-last'),
pytest.param('b', 'b', 0, id='current'),
pytest.param('b', 'a', 0, id='earlier'),
])
def test_configure_fp_limit(env, config, make_repo, source, limit, count, page):
prod, other = make_basic(env, config, make_repo, statuses="default")
with prod:
for i, descr in enumerate(descriptions):
[c] = prod.make_commits(
descr.source, Commit('c %d' % i, tree={str(i): str(i)}),
ref='heads/branch%d' % i,
)
pr = prod.make_pr(target=descr.source, head='branch%d'%i)
prod.post_status(c, 'success', 'legal/cla')
prod.post_status(c, 'success', 'ci/runbot')
pr.post_comment('hansen r+\n%s up to %s' % (bot_name, descr.limit), config['role_reviewer']['token'])
originals.append(pr.number)
[c] = prod.make_commits(
source, Commit('c', tree={'f': 'g'}),
ref='heads/branch',
)
pr = prod.make_pr(target=source, head='branch')
prod.post_status(c, 'success')
pr.post_comment(f'hansen r+ up to {limit}', config['role_reviewer']['token'])
env.run_crons()
with prod:
prod.post_status('staging.a', 'success', 'legal/cla')
prod.post_status('staging.a', 'success', 'ci/runbot')
prod.post_status('staging.b', 'success', 'legal/cla')
prod.post_status('staging.b', 'success', 'ci/runbot')
prod.post_status(f'staging.{source}', 'success')
env.run_crons()
# should have created a single FP PR for 0, none for 1 and none for 2
prs = env['runbot_merge.pull_requests'].search([], order='number')
assert len(prs) == 4
assert prs[-1].parent_id == prs[0]
assert prs[0].number == originals[0]
assert prs[1].number == originals[1]
assert prs[2].number == originals[2]
pr_id = to_pr(env, pr)
descendants = env['runbot_merge.pull_requests'].search([
('source_id', '=', pr_id.id)
])
assert len(descendants) == count
limit_id = env['runbot_merge.branch'].search([('name', '=', limit)])
assert pr_id.limit_id == limit_id
assert not descendants.limit_id, "descendant should not inherit the limit explicitly"
def test_self_disabled(env, config, make_repo):
""" Allow setting target as limit even if it's disabled
"""
prod, other = make_basic(env, config, make_repo)
bot_name = env['runbot_merge.project'].search([]).fp_github_name
branch_a = env['runbot_merge.branch'].search([('name', '=', 'a')])
branch_a.fp_target = False
with prod:
[c] = prod.make_commits('a', Commit('c', tree={'0': '0'}), ref='heads/mybranch')
pr = prod.make_pr(target='a', head='mybranch')
prod.post_status(c, 'success', 'legal/cla')
prod.post_status(c, 'success', 'ci/runbot')
pr.post_comment('hansen r+\n%s up to a' % bot_name, config['role_reviewer']['token'])
env.run_crons()
pr_id = env['runbot_merge.pull_requests'].search([('number', '=', pr.number)])
assert pr_id.limit_id == branch_a
# check that the basic thingie works
page(f'/{prod.name}/pull/{pr.number}.png')
with prod:
prod.post_status('staging.a', 'success', 'legal/cla')
prod.post_status('staging.a', 'success', 'ci/runbot')
if descendants:
c = env['runbot_merge.branch'].search([('name', '=', 'c')])
descendants.limit_id = c.id
assert env['runbot_merge.pull_requests'].search([]) == pr_id,\
"should not have created a forward port"
page(f'/{prod.name}/pull/{pr.number}.png')
def test_ignore(env, config, make_repo):
def test_ignore(env, config, make_repo, users):
""" Provide an "ignore" command which is equivalent to setting the limit
to target
"""
prod, other = make_basic(env, config, make_repo)
bot_name = env['runbot_merge.project'].search([]).fp_github_name
prod, _ = make_basic(env, config, make_repo, statuses="default")
branch_a = env['runbot_merge.branch'].search([('name', '=', 'a')])
with prod:
[c] = prod.make_commits('a', Commit('c', tree={'0': '0'}), ref='heads/mybranch')
pr = prod.make_pr(target='a', head='mybranch')
prod.post_status(c, 'success', 'legal/cla')
prod.post_status(c, 'success', 'ci/runbot')
pr.post_comment('hansen r+\n%s ignore' % bot_name, config['role_reviewer']['token'])
prod.post_status(c, 'success')
env.run_crons()
with prod:
pr.post_comment('hansen ignore', config['role_reviewer']['token'])
pr.post_comment('hansen r+ fw=no', config['role_reviewer']['token'])
env.run_crons()
pr_id = env['runbot_merge.pull_requests'].search([('number', '=', pr.number)])
assert pr_id.limit_id == branch_a
with prod:
prod.post_status('staging.a', 'success', 'legal/cla')
prod.post_status('staging.a', 'success', 'ci/runbot')
prod.post_status('staging.a', 'success')
env.run_crons()
assert env['runbot_merge.pull_requests'].search([]) == pr_id,\
"should not have created a forward port"
assert pr.comments == [
seen(env, pr, users),
(users['reviewer'], "hansen ignore"),
(users['reviewer'], "hansen r+ fw=no"),
(users['user'], "'ignore' is deprecated, use 'fw=no' to disable forward porting."),
(users['user'], "Forward-port disabled (via limit)."),
(users['user'], "Disabled forward-porting."),
]
@pytest.mark.parametrize('enabled', ['active', 'fp_target'])
def test_disable(env, config, make_repo, users, enabled):
def test_disable(env, config, make_repo, users):
""" Checks behaviour if the limit target is disabled:
* disable target while FP is ongoing -> skip over (and stop there so no FP)
* forward-port over a disabled branch
* request a disabled target as limit
Disabling (with respect to forward ports) can be performed by marking the
branch as !active (which also affects mergebot operations), or as
!fp_target (won't be forward-ported to).
"""
prod, other = make_basic(env, config, make_repo)
project = env['runbot_merge.project'].search([])
bot_name = project.fp_github_name
with prod:
[c] = prod.make_commits('a', Commit('c 0', tree={'0': '0'}), ref='heads/branch0')
pr = prod.make_pr(target='a', head='branch0')
prod.post_status(c, 'success', 'legal/cla')
prod.post_status(c, 'success', 'ci/runbot')
pr.post_comment('hansen r+\n%s up to b' % bot_name, config['role_reviewer']['token'])
pr.post_comment('hansen r+ up to b', config['role_reviewer']['token'])
[c] = prod.make_commits('a', Commit('c 1', tree={'1': '1'}), ref='heads/branch1')
pr = prod.make_pr(target='a', head='branch1')
@ -133,7 +103,7 @@ def test_disable(env, config, make_repo, users, enabled):
prod.post_status('staging.a', 'success', 'legal/cla')
prod.post_status('staging.a', 'success', 'ci/runbot')
# disable branch b
env['runbot_merge.branch'].search([('name', '=', 'b')]).write({enabled: False})
env['runbot_merge.branch'].search([('name', '=', 'b')]).active = False
env.run_crons()
# should have created a single PR (to branch c, for pr 1)
@ -141,85 +111,66 @@ def test_disable(env, config, make_repo, users, enabled):
assert p.parent_id == _1
assert p.target.name == 'c'
project.fp_github_token = config['role_other']['token']
bot_name = project.fp_github_name
with prod:
[c] = prod.make_commits('a', Commit('c 2', tree={'2': '2'}), ref='heads/branch2')
pr = prod.make_pr(target='a', head='branch2')
prod.post_status(c, 'success', 'legal/cla')
prod.post_status(c, 'success', 'ci/runbot')
pr.post_comment('hansen r+\n%s up to' % bot_name, config['role_reviewer']['token'])
pr.post_comment('%s up to b' % bot_name, config['role_reviewer']['token'])
pr.post_comment('%s up to foo' % bot_name, config['role_reviewer']['token'])
pr.post_comment('%s up to c' % bot_name, config['role_reviewer']['token'])
pr.post_comment('hansen r+ up to', config['role_reviewer']['token'])
pr.post_comment('hansen up to b', config['role_reviewer']['token'])
pr.post_comment('hansen up to foo', config['role_reviewer']['token'])
pr.post_comment('hansen up to c', config['role_reviewer']['token'])
env.run_crons()
# use a set because git webhooks delays might lead to mis-ordered
# responses and we don't care that much
assert set(pr.comments) == {
(users['reviewer'], "hansen r+\n%s up to" % bot_name),
(users['other'], "@%s please provide a branch to forward-port to." % users['reviewer']),
(users['reviewer'], "%s up to b" % bot_name),
(users['other'], "@%s branch 'b' is disabled, it can't be used as a forward port target." % users['reviewer']),
(users['reviewer'], "%s up to foo" % bot_name),
(users['other'], "@%s there is no branch 'foo', it can't be used as a forward port target." % users['reviewer']),
(users['reviewer'], "%s up to c" % bot_name),
(users['other'], "Forward-porting to 'c'."),
seen(env, pr, users),
(users['reviewer'], "hansen r+ up to"),
(users['user'], """\
@{reviewer} please provide a branch to forward-port to.
For your own safety I've ignored *everything in your entire comment*.
Currently available commands:
|command||
|-|-|
|`help`|displays this help|
|`r(eview)+`|approves the PR, if it's a forwardport also approves all non-detached parents|
|`r(eview)=<number>`|only approves the specified parents|
|`fw=no`|does not forward-port this PR|
|`fw=default`|forward-ports this PR normally|
|`fw=skipci`|does not wait for a forward-port's statuses to succeed before creating the next one|
|`up to <branch>`|only ports this PR forward to the specified branch (included)|
|`merge`|integrate the PR with a simple merge commit, using the PR description as message|
|`rebase-merge`|rebases the PR on top of the target branch the integrates with a merge commit, using the PR description as message|
|`rebase-ff`|rebases the PR on top of the target branch, then fast-forwards|
|`squash`|squashes the PR as a single commit on the target branch, using the PR description as message|
|`delegate+`|grants approval rights to the PR author|
|`delegate=<...>`|grants approval rights on this PR to the specified github users|
|`default`|stages the PR normally|
|`priority`|tries to stage this PR first, then adds `default` PRs if the staging has room|
|`alone`|stages this PR only with other PRs of the same priority|
|`cancel=staging`|automatically cancels the current staging when this PR becomes ready|
|`check`|fetches or refreshes PR metadata, resets mergebot state|
Note: this help text is dynamic and will change with the state of the PR.
""".format_map(users)),
(users['reviewer'], "hansen up to b"),
(users['user'], "@{reviewer} branch 'b' is disabled, it can't be used as a forward port target.".format_map(users)),
(users['reviewer'], "hansen up to foo"),
(users['user'], "@{reviewer} there is no branch 'foo', it can't be used as a forward port target.".format_map(users)),
(users['reviewer'], "hansen up to c"),
(users['user'], "Forward-porting to 'c'."),
}
def test_default_disabled(env, config, make_repo, users):
""" If the default limit is disabled, it should still be the default
limit but the ping message should be set on the actual last FP (to the
last non-deactivated target)
"""
prod, other = make_basic(env, config, make_repo)
branch_c = env['runbot_merge.branch'].search([('name', '=', 'c')])
branch_c.fp_target = False
with prod:
[c] = prod.make_commits('a', Commit('c', tree={'0': '0'}), ref='heads/branch0')
pr = prod.make_pr(target='a', head='branch0')
prod.post_status(c, 'success', 'legal/cla')
prod.post_status(c, 'success', 'ci/runbot')
pr.post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
assert env['runbot_merge.pull_requests'].search([]).limit_id == branch_c
with prod:
prod.post_status('staging.a', 'success', 'legal/cla')
prod.post_status('staging.a', 'success', 'ci/runbot')
env.run_crons()
p1, p2 = env['runbot_merge.pull_requests'].search([], order='number')
assert p1.number == pr.number
pr2 = prod.get_pr(p2.number)
cs = pr2.comments
assert len(cs) == 2
assert pr2.comments == [
seen(env, pr2, users),
(users['user'], """\
@%(user)s @%(reviewer)s this PR targets b and is the last of the forward-port chain.
To merge the full chain, say
> @%(user)s r+
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
""" % users)
]
def test_limit_after_merge(env, config, make_repo, users):
""" If attempting to set a limit (<up to>) on a PR which is merged
(already forward-ported or not), or is a forward-port PR, fwbot should
just feedback that it won't do it
"""
prod, other = make_basic(env, config, make_repo)
reviewer = config['role_reviewer']['token']
branch_b = env['runbot_merge.branch'].search([('name', '=', 'b')])
branch_c = env['runbot_merge.branch'].search([('name', '=', 'c')])
bot_name = env['runbot_merge.project'].search([]).fp_github_name
with prod:
[c] = prod.make_commits('a', Commit('c', tree={'0': '0'}), ref='heads/abranch')
pr1 = prod.make_pr(target='a', head='abranch')
@ -234,20 +185,20 @@ def test_limit_after_merge(env, config, make_repo, users):
env.run_crons()
p1, p2 = env['runbot_merge.pull_requests'].search([], order='number')
assert p1.limit_id == p2.limit_id == branch_c, "check that limit is correctly set"
assert p1.limit_id == p2.limit_id == env['runbot_merge.branch'].browse(())
pr2 = prod.get_pr(p2.number)
with prod:
pr1.post_comment(bot_name + ' up to b', reviewer)
pr2.post_comment(bot_name + ' up to b', reviewer)
pr1.post_comment('hansen up to b', reviewer)
pr2.post_comment('hansen up to b', reviewer)
env.run_crons()
assert p1.limit_id == p2.limit_id == branch_c, \
"check that limit was not updated"
assert p1.limit_id == p2.limit_id == branch_b
assert pr1.comments == [
(users['reviewer'], "hansen r+"),
seen(env, pr1, users),
(users['reviewer'], bot_name + ' up to b'),
(bot_name, "@%s forward-port limit can only be set before the PR is merged." % users['reviewer']),
(users['reviewer'], 'hansen up to b'),
(users['user'], "Forward-porting to 'b'."),
(users['user'], f"Forward-porting to 'b' (from {p2.display_name})."),
]
assert pr2.comments == [
seen(env, pr2, users),
@ -256,12 +207,8 @@ This PR targets b and is part of the forward-port chain. Further PRs will be cre
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
"""),
(users['reviewer'], bot_name + ' up to b'),
(bot_name, "@%s forward-port limit can only be set on an origin PR"
" (%s here) before it's merged and forward-ported." % (
users['reviewer'],
p1.display_name,
)),
(users['reviewer'], 'hansen up to b'),
(users['user'], f"Forward-porting {p1.display_name} to 'b'."),
]
# update pr2 to detach it from pr1
@ -277,17 +224,272 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
assert p2.source_id == p1
with prod:
pr2.post_comment(bot_name + ' up to b', reviewer)
pr2.post_comment('hansen up to c', reviewer)
env.run_crons()
assert pr2.comments[4:] == [
(bot_name, "@%s @%s this PR was modified / updated and has become a normal PR. "
"It should be merged the normal way (via @%s)" % (
users['user'], users['reviewer'],
p2.repository.project_id.github_prefix
)),
(users['reviewer'], bot_name + ' up to b'),
(bot_name, f"@{users['reviewer']} forward-port limit can only be set on an origin PR "
f"({p1.display_name} here) before it's merged and forward-ported."
),
(users['user'], f"@{users['user']} @{users['reviewer']} this PR was modified / updated and has become a normal PR. It must be merged directly."),
(users['reviewer'], 'hansen up to c'),
(users['user'], "Forward-porting to 'c'."),
]
with prod:
prod.post_status(p2.head, 'success', 'legal/cla')
prod.post_status(p2.head, 'success', 'ci/runbot')
pr2.post_comment('hansen r+', reviewer)
env.run_crons()
with prod:
prod.post_status('staging.b', 'success', 'legal/cla')
prod.post_status('staging.b', 'success', 'ci/runbot')
env.run_crons()
_, _, p3 = env['runbot_merge.pull_requests'].search([], order='number')
assert p3
pr3 = prod.get_pr(p3.number)
with prod:
pr3.post_comment("hansen up to c", reviewer)
env.run_crons()
assert pr3.comments == [
seen(env, pr3, users),
(users['user'], f"""\
@{users['user']} @{users['reviewer']} this PR targets c and is the last of the forward-port chain.
To merge the full chain, use
> @hansen r+
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
"""),
(users['reviewer'], "hansen up to c"),
(users['user'], f"Forward-porting {p2.display_name} to 'c'."),
]
# 7 of previous check, plus r+
assert pr2.comments[8:] == [
(users['user'], f"Forward-porting to 'c' (from {p3.display_name}).")
]
@pytest.mark.parametrize("update_from", [
pytest.param(lambda source: [('id', '=', source)], id='source'),
pytest.param(lambda source: [('source_id', '=', source), ('target', '=', '2')], id='child'),
pytest.param(lambda source: [('source_id', '=', source), ('target', '=', '3')], id='root'),
pytest.param(lambda source: [('source_id', '=', source), ('target', '=', '4')], id='parent'),
pytest.param(lambda source: [('source_id', '=', source), ('target', '=', '5')], id='current'),
# pytest.param(id='tip'), # doesn't exist
])
@pytest.mark.parametrize("limit", range(1, 6+1))
def test_post_merge(
env, post_merge, users, config, branches,
update_from: callable,
limit: int,
):
PRs = env['runbot_merge.pull_requests']
project, prod, _ = post_merge
reviewer = config['role_reviewer']['token']
# fetch source PR
[source] = PRs.search([('source_id', '=', False)])
# validate the forward ports for "child", "root", and "parent" so "current"
# exists and we have one more target
for branch in map(str, range(2, 4+1)):
setci(source=source, repo=prod, target=branch)
env.run_crons()
# update 3 to make it into a root
root = PRs.search([('source_id', '=', source.id), ('target.name', '=', '3')])
root.write({'parent_id': False, 'detach_reason': 'testing'})
# send detach messages so they're not part of the limit stuff batch
env.run_crons()
# cheat: we know PR numbers are assigned sequentially
prs = list(map(prod.get_pr, range(1, 6)))
before = {p.number: len(p.comments) for p in prs}
from_id = PRs.search(update_from(source.id))
from_ = prod.get_pr(from_id.number)
with prod:
from_.post_comment(f'hansen up to {limit}', reviewer)
env.run_crons()
# there should always be a comment on the source and root indicating how
# far we port
# the PR we post on should have a comment indicating the correction
current_id = PRs.search([('number', '=', '5')])
actual_limit = max(limit, 5)
for p in prs:
# case for the PR on which we posted the comment
if p.number == from_.number:
root_opt = '' if p.number == root.number else f' {root.display_name}'
trailer = '' if actual_limit == limit else f" (instead of the requested '{limit}' because {current_id.display_name} already exists)"
assert p.comments[before[p.number] + 1:] == [
(users['user'], f"Forward-porting{root_opt} to '{actual_limit}'{trailer}.")
]
# case for reference PRs source and root (which get their own notifications)
elif p.number in (source.number, root.number):
assert p.comments[before[p.number]:] == [
(users['user'], f"Forward-porting to '{actual_limit}' (from {from_id.display_name}).")
]
@pytest.mark.parametrize('mode', [
None,
# last forward port should fail ci, and only be validated after target bump
'failbump',
# last forward port should fail ci, then be validated, then target bump
'failsucceed',
# last forward port should be merged before bump
'mergetip',
# every forward port should be merged before bump
'mergeall',
])
def test_resume_fw(env, post_merge, users, config, branches, mode):
"""Singleton version of test_post_merge: completes the forward porting
including validation then tries to increase the limit, which should resume
forward porting
"""
PRs = env['runbot_merge.pull_requests']
project, prod, _ = post_merge
reviewer = config['role_reviewer']['token']
# fetch source PR
[source] = PRs.search([('source_id', '=', False)])
with prod:
prod.get_pr(source.number).post_comment('hansen up to 5', reviewer)
# validate the forward ports for "child", "root", and "parent" so "current"
# exists and we have one more target
for branch in map(str, range(2, 5+1)):
setci(
source=source, repo=prod, target=branch,
status='failure' if branch == '5' and mode in ('failbump', 'failsucceed') else 'success'
)
env.run_crons()
# cheat: we know PR numbers are assigned sequentially
prs = list(map(prod.get_pr, range(1, 6)))
before = {p.number: len(p.comments) for p in prs}
if mode == 'failsucceed':
setci(source=source, repo=prod, target=5)
# sees the success, limit is still 5, considers the porting finished
env.run_crons()
if mode and mode.startswith('merge'):
numbers = range(5 if mode == 'mergetip' else 2, 5 + 1)
with prod:
for number in numbers:
prod.get_pr(number).post_comment('hansen r+', reviewer)
env.run_crons()
with prod:
for target in numbers:
pr = PRs.search([('target.name', '=', str(target))])
prod.post_status(f'staging.{target}', 'success')
env.run_crons()
for number in numbers:
assert PRs.search([('number', '=', number)]).state == 'merged'
from_ = prod.get_pr(source.number)
with prod:
from_.post_comment('hansen up to 6', reviewer)
env.run_crons()
if mode == 'failbump':
setci(source=source, repo=prod, target=5)
# setci moved the PR from opened to validated, so *now* it can be
# forward-ported, but that still needs to actually happen
env.run_crons()
# since PR5 CI succeeded and we've increased the limit there should be a
# new PR
assert PRs.search([('source_id', '=', source.id), ('target.name', '=', 6)])
pr5_id = PRs.search([('source_id', '=', source.id), ('target.name', '=', 5)])
if mode == 'failbump':
# because the initial forward porting was never finished as the PR CI
# failed until *after* we bumped the limit, so it's not *resuming* per se.
assert prs[0].comments[before[1]+1:] == [
(users['user'], f"Forward-porting to '6'.")
]
else:
assert prs[0].comments[before[1]+1:] == [
(users['user'], f"Forward-porting to '6', resuming forward-port stopped at {pr5_id.display_name}.")
]
def setci(*, source, repo, target, status='success'):
"""Validates (CI success) the descendant of ``source`` targeting ``target``
in ``repo``.
"""
pr = source.search([('source_id', '=', source.id), ('target.name', '=', str(target))])
assert pr, f"could not find forward port of {source.display_name} to {target}"
with repo:
repo.post_status(pr.head, status)
@pytest.fixture(scope='session')
def branches():
"""Need enough branches to make space for:
- a source
- an ancestor (before and separated from the root, but not the source)
- a root (break in the parent chain
- a parent (between "current" and root)
- "current"
- the tip branch
"""
return range(1, 6 + 1)
@pytest.fixture
def post_merge(env, config, users, make_repo, branches):
"""Create a setup for the post-merge limits test which is both simpler and
more complicated than the standard test setup(s): it doesn't need more
variety in code, but it needs a lot more "depth" in terms of number of
branches it supports. Branches are fixture-ed to make it easier to share
between this fixture and the actual test.
All the branches are set to the same commit because that basically
shouldn't matter.
"""
prod = make_repo("post-merge-test")
with prod:
[c] = prod.make_commits(None, Commit('base', tree={'f': ''}))
for i in branches:
prod.make_ref(f'heads/{i}', c)
dev = prod.fork()
proj = env['runbot_merge.project'].create({
'name': prod.name,
'github_token': config['github']['token'],
'github_prefix': 'hansen',
'fp_github_token': config['github']['token'],
'fp_github_name': 'herbert',
'branch_ids': [
(0, 0, {'name': str(i), 'sequence': 1000 - (i * 10)})
for i in branches
],
'repo_ids': [
(0, 0, {
'name': prod.name,
'required_statuses': 'default',
'fp_remote_target': dev.name,
})
]
})
env['runbot_merge.events_sources'].create({'repository': prod.name})
env['res.partner'].search([
('github_login', '=', config['role_reviewer']['user'])
]).write({
'review_rights': [(0, 0, {'repository_id': proj.repo_ids.id, 'review': True})]
})
reviewer = config['role_reviewer']['token']
# merge the source PR
source_target = str(branches[0])
with prod:
[c] = prod.make_commits(source_target, Commit('my pr', tree={'x': ''}), ref='heads/mypr')
pr1 = prod.make_pr(target=source_target, head=c, title="a title")
prod.post_status(c, 'success')
pr1.post_comment('hansen r+', reviewer)
env.run_crons()
with prod:
prod.post_status(f'staging.{source_target}', 'success')
env.run_crons()
return proj, prod, dev

View File

@ -12,39 +12,45 @@ def test_override_inherited(env, config, make_repo, users):
"""
repo, other = make_basic(env, config, make_repo)
project = env['runbot_merge.project'].search([])
project.repo_ids.status_ids = [(5, 0, 0), (0, 0, {'context': 'default'})]
env['res.partner'].search([('github_login', '=', users['reviewer'])])\
.write({'override_rights': [(0, 0, {
'repository_id': project.repo_ids.id,
'context': 'ci/runbot',
'context': 'default',
})]})
with repo:
repo.make_commits('a', Commit('C', tree={'a': '0'}), ref='heads/change')
repo.make_commits('a', Commit('pr 1', tree={'a': '0'}), ref='heads/change')
pr = repo.make_pr(target='a', head='change')
repo.post_status('change', 'success', 'legal/cla')
pr.post_comment('hansen r+ override=ci/runbot', config['role_reviewer']['token'])
pr.post_comment('hansen r+ override=default', config['role_reviewer']['token'])
env.run_crons()
original = env['runbot_merge.pull_requests'].search([('repository.name', '=', repo.name), ('number', '=', pr.number)])
assert original.state == 'ready'
assert not original.limit_id
with repo:
repo.post_status('staging.a', 'success', 'legal/cla')
repo.post_status('staging.a', 'success', 'ci/runbot')
repo.post_status('staging.a', 'success')
env.run_crons()
pr0_id, pr1_id = env['runbot_merge.pull_requests'].search([], order='number')
pr0_id, pr1_id, pr2_id = env['runbot_merge.pull_requests'].search([], order='number')
assert pr0_id == original
assert pr1_id.parent_id, pr0_id
assert pr0_id.target.name == 'a'
with repo:
repo.post_status(pr1_id.head, 'success', 'legal/cla')
env.run_crons()
assert pr1_id.parent_id == pr0_id
assert pr1_id.number == 2
assert pr1_id.target.name == 'b'
assert pr1_id.state == 'validated'
assert statuses(pr1_id) == {'ci/runbot': 'success', 'legal/cla': 'success'}
assert statuses(pr1_id) == {'default': 'success'}
assert pr2_id.parent_id == pr1_id
assert pr2_id.target.name == 'c'
assert pr2_id.state == 'validated'
assert statuses(pr2_id) == {'default': 'success'}
# now we edit the child PR
pr_repo, pr_ref = repo.get_pr(pr1_id.number).branch
pr1 = repo.get_pr(pr1_id.number)
pr_repo, pr_ref = pr1.branch
with pr_repo:
pr_repo.make_commits(
pr1_id.target.name,
@ -56,6 +62,12 @@ def test_override_inherited(env, config, make_repo, users):
assert pr1_id.state == 'opened'
assert not pr1_id.parent_id
assert statuses(pr1_id) == {}, "should not have any status left"
assert statuses(pr2_id) == {}
with repo:
pr1.post_comment('hansen override=default', config['role_reviewer']['token'])
assert statuses(pr1_id) == {'default': 'success'}
assert statuses(pr2_id) == {'default': 'success'}
def test_override_combination(env, config, make_repo, users):
""" A forwardport should inherit its parents' overrides, until it's edited.

View File

@ -6,7 +6,7 @@ from datetime import datetime, timedelta
import pytest
from utils import seen, Commit, make_basic, REF_PATTERN, MESSAGE_TEMPLATE, validate_all, part_of
from utils import seen, Commit, make_basic, REF_PATTERN, MESSAGE_TEMPLATE, validate_all, part_of, to_pr, matches
FMT = '%Y-%m-%d %H:%M:%S'
FAKE_PREV_WEEK = (datetime.now() + timedelta(days=1)).strftime(FMT)
@ -35,7 +35,6 @@ def test_straightforward_flow(env, config, make_repo, users):
other_user = config['role_other']
other_user_repo = prod.fork(token=other_user['token'])
project = env['runbot_merge.project'].search([])
b_head = prod.commit('b')
c_head = prod.commit('c')
with prod, other_user_repo:
@ -109,7 +108,7 @@ def test_straightforward_flow(env, config, make_repo, users):
assert c.author['name'] == other_user['user'], "author should still be original's probably"
assert c.committer['name'] == other_user['user'], "committer should also still be the original's, really"
assert pr1.ping() == "@%s @%s " % (
assert pr1.ping == "@%s @%s " % (
config['role_other']['user'],
config['role_reviewer']['user'],
), "ping of forward-port PR should include author and reviewer of source"
@ -124,7 +123,7 @@ def test_straightforward_flow(env, config, make_repo, users):
prod.post_status(pr1.head, 'success', 'legal/cla')
env.run_crons()
env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK})
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
pr0_, pr1_, pr2 = env['runbot_merge.pull_requests'].search([], order='number')
@ -132,11 +131,15 @@ def test_straightforward_flow(env, config, make_repo, users):
(users['reviewer'], 'hansen r+ rebase-ff'),
seen(env, pr, users),
(users['user'], 'Merge method set to rebase and fast-forward.'),
(users['user'], '@%s @%s this pull request has forward-port PRs awaiting action (not merged or closed):\n%s' % (
users['other'], users['reviewer'],
'\n- '.join((pr1 | pr2).mapped('display_name'))
)),
]
pr1_remote = prod.get_pr(pr1.number)
assert pr1_remote.comments == [
seen(env, pr1_remote, users),
(users['user'], """\
This PR targets b and is part of the forward-port chain. Further PRs will be created up to c.
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
""")]
assert pr0_ == pr0
assert pr1_ == pr1
@ -160,21 +163,25 @@ def test_straightforward_flow(env, config, make_repo, users):
@%s @%s this PR targets c and is the last of the forward-port chain containing:
* %s
To merge the full chain, say
> @%s r+
To merge the full chain, use
> @hansen r+
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
""" % (
users['other'], users['reviewer'],
pr1.display_name,
project.fp_github_name
)),
(users['user'], "@%s @%s this forward port of %s is awaiting action (not merged or closed)." % (
users['other'],
users['reviewer'],
pr0.display_name,
))
]
with prod:
prod.post_status(pr2.head, 'success', 'ci/runbot')
prod.post_status(pr2.head, 'success', 'legal/cla')
pr2_remote.post_comment('%s r+' % project.fp_github_name, config['role_reviewer']['token'])
pr2_remote.post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
@ -232,7 +239,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
assert other_user_repo.get_ref(pr.ref) == p_1
# should have deleted all PR branches
pr1_ref = prod.get_pr(pr1.number).ref
pr1_ref = pr1_remote.ref
with pytest.raises(AssertionError, match='Not Found'):
other.get_ref(pr1_ref)
@ -315,36 +322,69 @@ def test_empty(env, config, make_repo, users):
assert env['runbot_merge.pull_requests'].search([], order='number') == prs
# change FP token to see if the feedback comes from the proper user
project = env['runbot_merge.project'].search([])
project.fp_github_token = config['role_other']['token']
project.write({
'fp_github_name': False,
'fp_github_token': config['role_other']['token'],
})
assert project.fp_github_name == users['other']
# check reminder
env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK})
env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK})
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
awaiting = (
users['other'],
'@%s @%s this pull request has forward-port PRs awaiting action (not merged or closed):\n%s' % (
'@%s @%s this forward port of %s is awaiting action (not merged or closed).' % (
users['user'], users['reviewer'],
fail_id.display_name
pr1_id.display_name
)
)
conflict = (users['user'], matches(
f"""@{users['user']} @{users['reviewer']} cherrypicking of pull request {pr1_id.display_name} failed.
stdout:
```
$$
```
stderr:
```
$$
```
Either perform the forward-port manually (and push to this branch, proceeding as usual) or close this PR (maybe?).
In the former case, you may want to edit this PR message as well.
:warning: after resolving this conflict, you will need to merge it via @{project.github_prefix}.
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
"""))
assert pr1.comments == [
(users['reviewer'], 'hansen r+'),
seen(env, pr1, users),
]
fail_pr = prod.get_pr(fail_id.number)
assert fail_pr.comments == [
seen(env, fail_pr, users),
conflict,
awaiting,
awaiting,
], "each cron run should trigger a new message on the ancestor"
], "each cron run should trigger a new message"
# check that this stops if we close the PR
with prod:
prod.get_pr(fail_id.number).close()
env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK})
fail_pr.close()
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
assert pr1.comments == [
(users['reviewer'], 'hansen r+'),
seen(env, pr1, users),
awaiting,
awaiting,
]
assert fail_pr.comments == [
seen(env, fail_pr, users),
conflict,
awaiting,
awaiting,
], "each cron run should trigger a new message"
def test_partially_empty(env, config, make_repo):
""" Check what happens when only some commits of the PR are now empty
@ -489,7 +529,7 @@ def test_access_rights(env, config, make_repo, users, author, reviewer, delegate
prod.post_status(pr2.head, 'success', 'ci/runbot')
prod.post_status(pr2.head, 'success', 'legal/cla')
prod.get_pr(pr2.number).post_comment(
'%s r+' % project.fp_github_name,
'hansen r+',
token=config['role_' + reviewer]['token']
)
env.run_crons()
@ -513,6 +553,69 @@ def signoff(conf, message):
return signoff
raise AssertionError("Failed to find signoff by %s in %s" % (conf, message))
def test_disapproval(env, config, make_repo, users):
"""The author of a source PR should be able to unapprove the forward port in
case they approved it then noticed an issue of something.
"""
# region setup
prod, _ = make_basic(env, config, make_repo, statuses='default')
env['res.partner'].create({
'name': users['other'],
'github_login': users['other'],
'email': 'other@example.org',
})
author_token = config['role_other']['token']
fork = prod.fork(token=author_token)
with prod, fork:
[c] = fork.make_commits('a', Commit('c_0', tree={'y': '0'}), ref='heads/accessrights')
pr0 = prod.make_pr(
target='a', title='my change',
head=users['other'] + ':accessrights',
token=author_token,
)
prod.post_status(c, 'success')
pr0.post_comment('hansen r+', token=config['role_reviewer']['token'])
env.run_crons()
with prod:
prod.post_status('staging.a', 'success')
env.run_crons()
pr0_id, pr1_id = env['runbot_merge.pull_requests'].search([], order='number')
assert pr1_id.source_id == pr0_id
pr1 = prod.get_pr(pr1_id.number)
assert pr0_id.state == 'merged'
with prod:
prod.post_status(pr1_id.head, 'success')
env.run_crons()
# endregion
_, _, pr2_id = env['runbot_merge.pull_requests'].search([], order='number')
pr2 = prod.get_pr(pr2_id.number)
with prod:
prod.post_status(pr2_id.head, 'success')
pr2.post_comment('hansen r+', token=config['role_other']['token'])
# no point creating staging for our needs, just propagate statuses
env.run_crons(None)
assert pr1_id.state == 'ready'
assert pr2_id.state == 'ready'
# oh no, pr1 has an error!
with prod:
pr1.post_comment('hansen r-', token=config['role_other']['token'])
env.run_crons(None)
assert pr1_id.state == 'validated', "pr1 should not be approved anymore"
assert pr2_id.state == 'ready', "pr2 should not be affected"
assert pr1.comments == [
seen(env, pr1, users),
(users['user'], 'This PR targets b and is part of the forward-port chain. Further PRs will be created up to c.\n\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n'),
(users['other'], "hansen r-"),
(users['user'], "Note that only this forward-port has been unapproved, "
"sibling forward ports may have to be unapproved "
"individually."),
]
def test_delegate_fw(env, config, make_repo, users):
"""If a user is delegated *on a forward port* they should be able to approve
@ -582,8 +685,8 @@ def test_delegate_fw(env, config, make_repo, users):
seen(env, pr2, users),
(users['user'], '''@{self_reviewer} @{reviewer} this PR targets c and is the last of the forward-port chain.
To merge the full chain, say
> @{user} r+
To merge the full chain, use
> @hansen r+
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
'''.format_map(users)),
@ -626,7 +729,7 @@ def test_redundant_approval(env, config, make_repo, users):
with prod:
pr1.post_comment('hansen r+', config['role_reviewer']['token'])
with prod:
pr2.post_comment(f'{project.fp_github_name} r+', config['role_reviewer']['token'])
pr2.post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
assert pr1.comments == [
@ -738,7 +841,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
# ok main1 PRs
with main1:
validate_all([main1], [pr1c.head])
main1.get_pr(pr1c.number).post_comment('%s r+' % project.fp_github_name, config['role_reviewer']['token'])
main1.get_pr(pr1c.number).post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
# check that the main1 PRs are ready but blocked on the main2 PRs
@ -750,7 +853,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
# ok main2 PRs
with main2:
validate_all([main2], [pr2c.head])
main2.get_pr(pr2c.number).post_comment('%s r+' % project.fp_github_name, config['role_reviewer']['token'])
main2.get_pr(pr2c.number).post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
env['runbot_merge.stagings'].search([]).mapped('target.display_name')
@ -796,7 +899,7 @@ class TestClosing:
prod.post_status(pr1_id.head, 'success', 'legal/cla')
prod.post_status(pr1_id.head, 'success', 'ci/runbot')
env.run_crons()
env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron')
env.run_crons('forwardport.reminder')
assert env['runbot_merge.pull_requests'].search([], order='number') == pr0_id | pr1_id,\
"closing the PR should suppress the FP sequence"
@ -858,27 +961,86 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
with prod:
pr1.open()
assert pr1_id.state == 'validated'
env.run_crons()
assert pr1.comments[-1] == (
users['user'],
"@{} @{} this PR was closed then reopened. "
"It should be merged the normal way (via @{})".format(
users['user'],
users['reviewer'],
project.github_prefix,
)
)
assert not pr1_id.parent_id
assert not pr2_id.parent_id
with prod:
pr1.post_comment(f'{project.fp_github_name} r+', config['role_reviewer']['token'])
def test_close_disabled(self, env, make_repo, users, config):
""" If an fwport's target is disabled and its branch is closed, it
should not be notified (multiple times), also its descendant should not
be nodified if already merged, also there should not be recursive
notifications (odoo/odoo#145969, odoo/odoo#145984)
"""
repo, _ = make_basic(env, config, make_repo)
env['runbot_merge.repository'].search([]).required_statuses = 'default'
# prep: merge PR, create two forward ports
with repo:
[c1] = repo.make_commits('a', Commit('first', tree={'m': 'c1'}))
pr1 = repo.make_pr(title='title', body='body', target='a', head=c1)
pr1.post_comment('hansen r+', config['role_reviewer']['token'])
repo.post_status(c1, 'success')
env.run_crons()
assert pr1.comments[-1] == (
users['user'],
"@{} I can only do this on unmodified forward-port PRs, ask {}.".format(
users['reviewer'],
project.github_prefix,
),
)
pr1_id = to_pr(env, pr1)
assert pr1_id.state == 'ready', pr1_id.blocked
with repo:
repo.post_status('staging.a', 'success')
env.run_crons()
pr1_id_, pr2_id = env['runbot_merge.pull_requests'].search([], order='number')
assert pr1_id_ == pr1_id
with repo:
repo.post_status(pr2_id.head, 'success')
env.run_crons()
_, _, pr3_id = env['runbot_merge.pull_requests'].search([], order='number')
# disable second branch
pr2_id.target.active = False
env.run_crons()
pr2 = repo.get_pr(pr2_id.number)
assert pr2.comments == [
seen(env, pr2, users),
(users['user'], "This PR targets b and is part of the forward-port chain. "
"Further PRs will be created up to c.\n\n"
"More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n"),
(users['user'], "@{user} @{reviewer} the target branch 'b' has been disabled, you may want to close this PR.".format_map(
users
)),
]
pr3 = repo.get_pr(pr3_id.number)
assert pr3.comments == [
seen(env, pr3, users),
(users['user'], """\
@{user} @{reviewer} this PR targets c and is the last of the forward-port chain containing:
* {pr2_id.display_name}
To merge the full chain, use
> @hansen r+
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
""".format(pr2_id=pr2_id, **users)),
]
# some time later, notice PR3 is open and merge it
with repo:
pr3.post_comment('hansen r+', config['role_reviewer']['token'])
repo.post_status(pr3.head, 'success')
env.run_crons()
with repo:
repo.post_status('staging.c', 'success')
env.run_crons()
assert pr3_id.status == 'success'
# even later, notice PR2 is still open but not mergeable anymore
with repo:
pr2.close()
env.run_crons()
assert pr2.comments[3:] == []
assert pr3.comments[2:] == [(users['reviewer'], "hansen r+")]
class TestBranchDeletion:
def test_delete_normal(self, env, config, make_repo):
@ -981,50 +1143,44 @@ class TestRecognizeCommands:
('number', '=', pr.number),
])
# FIXME: remove / merge into mergebot tests
def test_botname_casing(self, env, config, make_repo):
""" Test that the botname is case-insensitive as people might write
bot names capitalised or titlecased or uppercased or whatever
"""
repo, pr, pr_id = self.make_pr(env, config, make_repo)
assert pr_id.state == 'opened'
botname = env['runbot_merge.project'].search([]).fp_github_name
[a] = env['runbot_merge.branch'].search([
('name', '=', 'a')
])
[c] = env['runbot_merge.branch'].search([
('name', '=', 'c')
])
names = [
botname,
botname.upper(),
botname.capitalize(),
sPeNgBaB(botname),
"hansen",
"HANSEN",
"Hansen",
sPeNgBaB("hansen"),
]
for n in names:
assert pr_id.limit_id == c
assert not pr_id.limit_id
with repo:
pr.post_comment('@%s up to a' % n, config['role_reviewer']['token'])
pr.post_comment(f'@{n} up to a', config['role_reviewer']['token'])
assert pr_id.limit_id == a
# reset state
pr_id.write({'limit_id': c.id})
pr_id.limit_id = False
# FIXME: remove / merge into mergebot tests
@pytest.mark.parametrize('indent', ['', '\N{SPACE}', '\N{SPACE}'*4, '\N{TAB}'])
def test_botname_indented(self, env, config, make_repo, indent):
""" matching botname should ignore leading whitespaces
"""
repo, pr, pr_id = self.make_pr(env, config, make_repo)
assert pr_id.state == 'opened'
botname = env['runbot_merge.project'].search([]).fp_github_name
[a] = env['runbot_merge.branch'].search([
('name', '=', 'a')
])
[c] = env['runbot_merge.branch'].search([
('name', '=', 'c')
])
assert pr_id.limit_id == c
assert not pr_id.limit_id
with repo:
pr.post_comment('%s@%s up to a' % (indent, botname), config['role_reviewer']['token'])
pr.post_comment(f'{indent}@hansen up to a', config['role_reviewer']['token'])
assert pr_id.limit_id == a

View File

@ -3,14 +3,14 @@ Test cases for updating PRs during after the forward-porting process after the
initial merge has succeeded (and forward-porting has started)
"""
import re
import sys
import pytest
from utils import seen, re_matches, Commit, make_basic, to_pr
from utils import seen, matches, Commit, make_basic, to_pr
def test_update_pr(env, config, make_repo, users):
@pytest.mark.parametrize("merge_parent", [False, True])
def test_update_pr(env, config, make_repo, users, merge_parent) -> None:
""" Even for successful cherrypicks, it's possible that e.g. CI doesn't
pass or the reviewer finds out they need to update the code.
@ -18,6 +18,14 @@ def test_update_pr(env, config, make_repo, users):
only this one and its dependent should be updated?
"""
prod, _ = make_basic(env, config, make_repo)
# create a branch d from c so we can have 3 forward ports PRs, not just 2,
# for additional checks
env['runbot_merge.project'].search([]).write({
'branch_ids': [(0, 0, {'name': 'd', 'sequence': 40})]
})
with prod:
prod.make_commits('c', Commit('1111', tree={'i': 'a'}), ref='heads/d')
with prod:
[p_1] = prod.make_commits(
'a',
@ -25,11 +33,22 @@ def test_update_pr(env, config, make_repo, users):
ref='heads/hugechange'
)
pr = prod.make_pr(target='a', head='hugechange')
prod.post_status(p_1, 'success', 'legal/cla')
prod.post_status(p_1, 'success', 'ci/runbot')
pr.post_comment('hansen r+', config['role_reviewer']['token'])
prod.post_status(p_1, 'success', 'legal/cla')
prod.post_status(p_1, 'failure', 'ci/runbot')
env.run_crons()
assert pr.comments == [
(users['reviewer'], 'hansen r+'),
seen(env, pr, users),
(users['user'], "@{user} @{reviewer} 'ci/runbot' failed on this reviewed PR.".format_map(users)),
]
with prod:
prod.post_status(p_1, 'success', 'ci/runbot')
env.run_crons()
with prod:
prod.post_status('staging.a', 'success', 'legal/cla')
prod.post_status('staging.a', 'success', 'ci/runbot')
@ -40,7 +59,7 @@ def test_update_pr(env, config, make_repo, users):
pr0_id, pr1_id = env['runbot_merge.pull_requests'].search([], order='number')
fp_intermediate = (users['user'], '''\
This PR targets b and is part of the forward-port chain. Further PRs will be created up to c.
This PR targets b and is part of the forward-port chain. Further PRs will be created up to d.
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
''')
@ -100,15 +119,6 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
assert pr1_id.head == new_c != pr1_head, "the FP PR should be updated"
assert not pr1_id.parent_id, "the FP PR should be detached from the original"
assert pr1_remote.comments == [
seen(env, pr1_remote, users),
fp_intermediate, ci_warning, ci_warning,
(users['user'], "@%s @%s this PR was modified / updated and has become a normal PR. "
"It should be merged the normal way (via @%s)" % (
users['user'], users['reviewer'],
pr1_id.repository.project_id.github_prefix
)),
], "users should be warned that the PR has become non-FP"
# NOTE: should the followup PR wait for pr1 CI or not?
assert pr2_id.head != pr2_head
assert pr2_id.parent_id == pr1_id, "the followup PR should still be linked"
@ -125,6 +135,69 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
'x': '5'
}, "the followup FP should also have the update"
with prod:
prod.post_status(pr2_id.head, 'success', 'ci/runbot')
prod.post_status(pr2_id.head, 'success', 'legal/cla')
env.run_crons()
pr2 = prod.get_pr(pr2_id.number)
if merge_parent:
with prod:
pr2.post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
with prod:
prod.post_status('staging.c', 'success', 'ci/runbot')
prod.post_status('staging.c', 'success', 'legal/cla')
env.run_crons()
assert pr2_id.state == 'merged'
_0, _1, _2, pr3_id = env['runbot_merge.pull_requests'].search([], order='number')
assert pr3_id.parent_id == pr2_id
# don't bother updating heads (?)
pr3_id.write({'parent_id': False, 'detach_reason': "testing"})
# pump feedback messages
env.run_crons()
pr3 = prod.get_pr(pr3_id.number)
assert pr3.comments == [
seen(env, pr3, users),
(users['user'], f"""\
@{users['user']} @{users['reviewer']} this PR targets d and is the last of the forward-port chain containing:
* {pr2_id.display_name}
To merge the full chain, use
> @hansen r+
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
"""),
(users['user'], f"@{users['user']} @{users['reviewer']} this PR was "
f"modified / updated and has become a normal PR. It "
f"must be merged directly."
)
]
assert pr2.comments[:2] == [
seen(env, pr2, users),
(users['user'], """\
This PR targets c and is part of the forward-port chain. Further PRs will be created up to d.
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
"""),
]
if merge_parent:
assert pr2.comments[2:] == [
(users['reviewer'], "hansen r+"),
]
else:
assert pr2.comments[2:] == [
(users['user'], f"@{users['user']} @{users['reviewer']} child PR "
f"{pr3_id.display_name} was modified / updated and has "
f"become a normal PR. This PR (and any of its parents) "
f"will need to be merged independently as approvals "
f"won't cross."),
]
def test_update_merged(env, make_repo, config, users):
""" Strange things happen when an FP gets closed / merged but then its
parent is modified and the forwardport tries to update the (now merged)
@ -151,9 +224,7 @@ def test_update_merged(env, make_repo, config, users):
with prod:
prod.make_ref('heads/d', prod.commit('c').id)
env['runbot_merge.project'].search([]).write({
'branch_ids': [(0, 0, {
'name': 'd', 'sequence': 40, 'fp_target': True,
})]
'branch_ids': [(0, 0, {'name': 'd', 'sequence': 40})]
})
with prod:
@ -250,11 +321,12 @@ def test_duplicate_fw(env, make_repo, setreviewers, config, users):
'github_token': config['github']['token'],
'github_prefix': 'hansen',
'fp_github_token': config['github']['token'],
'fp_github_name': 'herbert',
'branch_ids': [
(0, 0, {'name': 'master', 'sequence': 0, 'fp_target': True}),
(0, 0, {'name': 'v3', 'sequence': 1, 'fp_target': True}),
(0, 0, {'name': 'v2', 'sequence': 2, 'fp_target': True}),
(0, 0, {'name': 'v1', 'sequence': 3, 'fp_target': True}),
(0, 0, {'name': 'master', 'sequence': 0}),
(0, 0, {'name': 'v3', 'sequence': 1}),
(0, 0, {'name': 'v2', 'sequence': 2}),
(0, 0, {'name': 'v1', 'sequence': 3}),
],
'repo_ids': [
(0, 0, {
@ -265,6 +337,7 @@ def test_duplicate_fw(env, make_repo, setreviewers, config, users):
]
})
setreviewers(*proj.repo_ids)
env['runbot_merge.events_sources'].create({'repository': repo.name})
# create a PR in v1, merge it, then create all 3 ports
with repo:
@ -304,7 +377,7 @@ def test_duplicate_fw(env, make_repo, setreviewers, config, users):
with repo:
repo.make_commits('v2', Commit('c0', tree={'z': 'b'}), ref=prv2.ref, make=False)
env.run_crons()
assert pr_ids.mapped('state') == ['merged', 'opened', 'validated', 'validated']
assert pr_ids.mapped('state') == ['merged', 'opened', 'opened', 'opened']
assert repo.read_tree(repo.commit(prv2_id.head)) == {'f': 'c', 'h': 'a', 'z': 'b'}
assert repo.read_tree(repo.commit(prv3_id.head)) == {'f': 'd', 'i': 'a', 'z': 'b'}
assert repo.read_tree(repo.commit(prmaster_id.head)) == {'f': 'e', 'z': 'b'}
@ -372,12 +445,12 @@ def test_subsequent_conflict(env, make_repo, config, users):
assert repo.read_tree(repo.commit(pr3_id.head)) == {
'f': 'c',
'g': 'a',
'h': re_matches(r'''<<<\x3c<<< HEAD
'h': matches('''<<<\x3c<<< $$
a
|||||||| parent of [\da-f]{7,}.*
||||||| $$
=======
conflict!
>>>\x3e>>> [\da-f]{7,}.*
>>>\x3e>>> $$
'''),
'x': '0',
}
@ -397,18 +470,13 @@ conflict!
# 1. link to status page
# 2. forward-port chain thing
assert repo.get_pr(pr3_id.number).comments[2:] == [
(users['user'], re_matches(f'''\
(users['user'], f'''\
@{users['user']} @{users['reviewer']} WARNING: the update of {pr2_id.display_name} to {pr2_id.head} has caused a \
conflict in this pull request, data may have been lost.
stdout:
```.*?
CONFLICT \\(add/add\\): Merge conflict in h.*?
```
stderr:
```
\\d{{2}}:\\d{{2}}:\\d{{2}}.\\d+ .* {pr2_id.head}
error: could not apply [0-9a-f]+\\.\\.\\. newfiles
''', re.DOTALL))
Auto-merging h
CONFLICT (add/add): Merge conflict in h
```'''),
]

View File

@ -1,85 +1,17 @@
# -*- coding: utf-8 -*-
from datetime import datetime
from datetime import datetime, timedelta
import pytest
from utils import seen, Commit, to_pr
from utils import seen, Commit, to_pr, make_basic
def make_basic(env, config, make_repo, *, fp_token, fp_remote):
""" Creates a basic repo with 3 forking branches
0 -- 1 -- 2 -- 3 -- 4 : a
|
`-- 11 -- 22 : b
|
`-- 111 : c
each branch just adds and modifies a file (resp. f, g and h) through the
contents sequence a b c d e
"""
Projects = env['runbot_merge.project']
project = Projects.search([('name', '=', 'myproject')])
if not project:
project = Projects.create({
'name': 'myproject',
'github_token': config['github']['token'],
'github_prefix': 'hansen',
'fp_github_token': fp_token and config['github']['token'],
'branch_ids': [
(0, 0, {'name': 'a', 'sequence': 2, 'fp_target': True}),
(0, 0, {'name': 'b', 'sequence': 1, 'fp_target': True}),
(0, 0, {'name': 'c', 'sequence': 0, 'fp_target': True}),
],
})
prod = make_repo('proj')
with prod:
a_0, a_1, a_2, a_3, a_4, = prod.make_commits(
None,
Commit("0", tree={'f': 'a'}),
Commit("1", tree={'f': 'b'}),
Commit("2", tree={'f': 'c'}),
Commit("3", tree={'f': 'd'}),
Commit("4", tree={'f': 'e'}),
ref='heads/a',
)
b_1, b_2 = prod.make_commits(
a_2,
Commit('11', tree={'g': 'a'}),
Commit('22', tree={'g': 'b'}),
ref='heads/b',
)
prod.make_commits(
b_1,
Commit('111', tree={'h': 'a'}),
ref='heads/c',
)
other = prod.fork()
repo = env['runbot_merge.repository'].create({
'project_id': project.id,
'name': prod.name,
'required_statuses': 'legal/cla,ci/runbot',
'fp_remote_target': fp_remote and other.name,
})
env['res.partner'].search([
('github_login', '=', config['role_reviewer']['user'])
]).write({
'review_rights': [(0, 0, {'repository_id': repo.id, 'review': True})]
})
env['res.partner'].search([
('github_login', '=', config['role_self_reviewer']['user'])
]).write({
'review_rights': [(0, 0, {'repository_id': repo.id, 'self_review': True})]
})
return project, prod, other
def test_no_token(env, config, make_repo):
""" if there's no token on the repo, nothing should break though should
log
"""
# create project configured with remotes on the repo but no token
proj, prod, _ = make_basic(env, config, make_repo, fp_token=False, fp_remote=True)
prod, _ = make_basic(env, config, make_repo, fp_token=False, fp_remote=True)
with prod:
prod.make_commits(
@ -109,8 +41,8 @@ def test_no_token(env, config, make_repo):
"should not have created forward port"
def test_remove_token(env, config, make_repo):
proj, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
proj.fp_github_token = False
prod, _ = make_basic(env, config, make_repo)
env['runbot_merge.project'].search([]).fp_github_token = False
with prod:
prod.make_commits(
@ -131,7 +63,7 @@ def test_remove_token(env, config, make_repo):
"should not have created forward port"
def test_no_target(env, config, make_repo):
proj, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=False)
prod, _ = make_basic(env, config, make_repo, fp_remote=False)
with prod:
prod.make_commits(
@ -152,7 +84,7 @@ def test_no_target(env, config, make_repo):
"should not have created forward port"
def test_failed_staging(env, config, make_repo):
proj, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
prod, _ = make_basic(env, config, make_repo)
reviewer = config['role_reviewer']['token']
with prod:
@ -179,7 +111,7 @@ def test_failed_staging(env, config, make_repo):
with prod:
prod.post_status(pr3_id.head, 'success', 'legal/cla')
prod.post_status(pr3_id.head, 'success', 'ci/runbot')
pr3.post_comment('%s r+' % proj.fp_github_name, reviewer)
pr3.post_comment('hansen r+', reviewer)
env.run_crons()
prod.commit('staging.c')
@ -190,19 +122,8 @@ def test_failed_staging(env, config, make_repo):
prod.post_status('staging.c', 'failure', 'ci/runbot')
env.run_crons()
pr3_head = env['runbot_merge.commit'].search([
('sha', '=', pr3_id.head),
])
assert len(pr3_head) == 1
assert not pr3_id.batch_id, "check that the PR indeed has no batch anymore"
assert not pr3_id.batch_ids.filtered(lambda b: b.active)
assert len(env['runbot_merge.batch'].search([
('prs', 'in', pr3_id.id),
'|', ('active', '=', True),
('active', '=', False),
])) == 2, "check that there do exist batches"
pr3_head = env['runbot_merge.commit'].search([('sha', '=', pr3_id.head)])
assert pr3_head
# send a new status to the PR, as if somebody had rebuilt it or something
with prod:
@ -212,6 +133,8 @@ def test_failed_staging(env, config, make_repo):
assert pr3_head.to_check, "check that the commit was updated as to process"
env.run_crons()
assert not pr3_head.to_check, "check that the commit was processed"
assert pr3_id.state == 'ready'
assert pr3_id.staging_id
class TestNotAllBranches:
""" Check that forward-ports don't behave completely insanely when not all
@ -262,10 +185,11 @@ class TestNotAllBranches:
'github_token': config['github']['token'],
'github_prefix': 'hansen',
'fp_github_token': config['github']['token'],
'fp_github_name': 'herbert',
'branch_ids': [
(0, 0, {'name': 'a', 'sequence': 2, 'fp_target': True}),
(0, 0, {'name': 'b', 'sequence': 1, 'fp_target': True}),
(0, 0, {'name': 'c', 'sequence': 0, 'fp_target': True}),
(0, 0, {'name': 'a', 'sequence': 2}),
(0, 0, {'name': 'b', 'sequence': 1}),
(0, 0, {'name': 'c', 'sequence': 0}),
]
})
repo_a = env['runbot_merge.repository'].create({
@ -282,6 +206,7 @@ class TestNotAllBranches:
'branch_filter': '[("name", "in", ["a", "c"])]',
})
setreviewers(repo_a, repo_b)
env['runbot_merge.events_sources'].create([{'repository': a.name}, {'repository': b.name}])
return project, a, a_dev, b, b_dev
def test_single_first(self, env, repos, config):
@ -314,7 +239,7 @@ class TestNotAllBranches:
with a:
a.post_status(pr2.head, 'success', 'ci/runbot')
a.get_pr(pr2.number).post_comment(
'%s r+' % project.fp_github_name,
'hansen r+',
config['role_reviewer']['token'])
env.run_crons()
assert pr1.staging_id
@ -353,7 +278,7 @@ class TestNotAllBranches:
with b:
b.post_status(pr1.head, 'success', 'ci/runbot')
b.get_pr(pr1.number).post_comment(
'%s r+' % project.fp_github_name,
'hansen r+',
config['role_reviewer']['token'])
env.run_crons()
with a, b:
@ -401,7 +326,7 @@ class TestNotAllBranches:
assert pr_a.comments == [
(users['reviewer'], 'hansen r+'),
seen(env, pr_a, users),
(users['user'], "@%s @%s this pull request can not be forward ported:"
(users['user'], "@%s @%s this pull request can not be forward-ported:"
" next branch is 'b' but linked pull request %s "
"has a next branch 'c'." % (
users['user'], users['reviewer'], pr_b_id.display_name,
@ -410,7 +335,7 @@ class TestNotAllBranches:
assert pr_b.comments == [
(users['reviewer'], 'hansen r+'),
seen(env, pr_b, users),
(users['user'], "@%s @%s this pull request can not be forward ported:"
(users['user'], "@%s @%s this pull request can not be forward-ported:"
" next branch is 'c' but linked pull request %s "
"has a next branch 'b'." % (
users['user'], users['reviewer'], pr_a_id.display_name,
@ -428,8 +353,9 @@ def test_new_intermediate_branch(env, config, make_repo):
def validate(repo, commit):
repo.post_status(commit, 'success', 'ci/runbot')
repo.post_status(commit, 'success', 'legal/cla')
project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
_, prod2, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
prod, _ = make_basic(env, config, make_repo)
prod2, _ = make_basic(env, config, make_repo)
project = env['runbot_merge.project'].search([])
assert len(project.repo_ids) == 2
original_c_tree = prod.read_tree(prod.commit('c'))
@ -472,6 +398,7 @@ def test_new_intermediate_branch(env, config, make_repo):
with prod:
validate(prod, pr0_fp_id.head)
env.run_crons()
assert pr0_fp_id.state == 'validated'
original0 = PRs.search([('parent_id', '=', pr0_fp_id.id)])
assert original0, "Could not find FP of PR0 to C"
assert original0.target.name == 'c'
@ -514,11 +441,12 @@ def test_new_intermediate_branch(env, config, make_repo):
env.run_crons()
project.write({
'branch_ids': [
(0, False, {'name': 'new', 'sequence': 1, 'fp_target': True}),
(0, False, {'name': 'new', 'sequence': 1}),
]
})
env.run_crons()
assert pr0_fp_id.state == 'validated'
# created an intermediate PR for 0 and x
desc0 = PRs.search([('source_id', '=', pr0_id.id)])
new0 = desc0 - pr0_fp_id - original0
@ -574,7 +502,7 @@ def test_new_intermediate_branch(env, config, make_repo):
with prod, prod2:
for pr in fps.filtered(lambda p: p.target.name == 'c'):
get_repo(pr).get_pr(pr.number).post_comment(
'%s r+' % project.fp_github_name,
'hansen r+',
config['role_reviewer']['token'])
assert all(p.state == 'merged' for p in PRs.browse(sources)),\
"all sources should be merged"
@ -604,7 +532,7 @@ def test_new_intermediate_branch(env, config, make_repo):
}, "check that new got all the updates (should be in the same state as c really)"
def test_author_can_close_via_fwbot(env, config, make_repo):
project, prod, xxx = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
prod, _ = make_basic(env, config, make_repo)
other_user = config['role_other']
other_token = other_user['token']
other = prod.fork(token=other_token)
@ -621,7 +549,7 @@ def test_author_can_close_via_fwbot(env, config, make_repo):
pr.open(other_token)
prod.post_status(c, 'success', 'legal/cla')
prod.post_status(c, 'success', 'ci/runbot')
pr.post_comment('%s close' % project.fp_github_name, other_token)
pr.post_comment('hansen close', other_token)
pr.post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
assert pr.state == 'open'
@ -641,26 +569,26 @@ def test_author_can_close_via_fwbot(env, config, make_repo):
pr1.close(other_token)
# use can close via fwbot
with prod:
pr1.post_comment('%s close' % project.fp_github_name, other_token)
pr1.post_comment('hansen close', other_token)
env.run_crons()
assert pr1.state == 'closed'
assert pr1_id.state == 'closed'
def test_skip_ci_all(env, config, make_repo):
project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
prod, _ = make_basic(env, config, make_repo)
with prod:
prod.make_commits('a', Commit('x', tree={'x': '0'}), ref='heads/change')
pr = prod.make_pr(target='a', head='change')
prod.post_status(pr.head, 'success', 'legal/cla')
prod.post_status(pr.head, 'success', 'ci/runbot')
pr.post_comment('%s skipci' % project.fp_github_name, config['role_reviewer']['token'])
pr.post_comment('hansen fw=skipci', config['role_reviewer']['token'])
pr.post_comment('hansen r+', config['role_reviewer']['token'])
env.run_crons()
assert env['runbot_merge.pull_requests'].search([
('repository.name', '=', prod.name),
('number', '=', pr.number)
]).fw_policy == 'skipci'
]).batch_id.fw_policy == 'skipci'
with prod:
prod.post_status('staging.a', 'success', 'legal/cla')
@ -679,7 +607,7 @@ def test_skip_ci_all(env, config, make_repo):
assert pr2_id.source_id == pr0_id
def test_skip_ci_next(env, config, make_repo):
project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
prod, _ = make_basic(env, config, make_repo)
with prod:
prod.make_commits('a', Commit('x', tree={'x': '0'}), ref='heads/change')
@ -697,10 +625,10 @@ def test_skip_ci_next(env, config, make_repo):
pr0_id, pr1_id = env['runbot_merge.pull_requests'].search([], order='number')
with prod:
prod.get_pr(pr1_id.number).post_comment(
'%s skipci' % project.fp_github_name,
config['role_user']['token']
'hansen fw=skipci',
config['role_reviewer']['token']
)
assert pr0_id.fw_policy == 'skipci'
assert pr0_id.batch_id.fw_policy == 'skipci'
env.run_crons()
_, _, pr2_id = env['runbot_merge.pull_requests'].search([], order='number')
@ -717,7 +645,8 @@ def test_retarget_after_freeze(env, config, make_repo, users):
latter port. In that case the reinsertion task should just do nothing, and
the retargeted PR should be forward-ported normally once merged.
"""
project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
prod, _ = make_basic(env, config, make_repo)
project = env['runbot_merge.project'].search([])
with prod:
[c] = prod.make_commits('b', Commit('thing', tree={'x': '1'}), ref='heads/mypr')
pr = prod.make_pr(target='b', head='mypr')
@ -748,7 +677,7 @@ def test_retarget_after_freeze(env, config, make_repo, users):
project.write({
'branch_ids': [
(1, branch_c.id, {'sequence': 1}),
(0, 0, {'name': 'bprime', 'sequence': 2, 'fp_target': True}),
(0, 0, {'name': 'bprime', 'sequence': 2}),
(1, branch_b.id, {'sequence': 3}),
(1, branch_a.id, {'sequence': 4}),
]
@ -766,7 +695,7 @@ def test_retarget_after_freeze(env, config, make_repo, users):
port_pr.base = 'bprime'
assert port_id.target == new_branch
env.run_crons('forwardport.port_forward')
env.run_crons(None)
assert not job.exists(), "job should have succeeded and apoptosed"
# since the PR was "already forward-ported" to the new branch it should not
@ -784,13 +713,16 @@ def test_retarget_after_freeze(env, config, make_repo, users):
prod.post_status('staging.bprime', 'success', 'legal/cla')
env.run_crons()
# #2 batch 6 (???)
assert port_id.state == 'merged'
new_pr_id = env['runbot_merge.pull_requests'].search([('state', 'not in', ('merged', 'closed'))])
assert len(new_pr_id) == 1
assert new_pr_id.parent_id == port_id
assert new_pr_id.target == branch_c
def test_approve_draft(env, config, make_repo, users):
_, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
prod, _ = make_basic(env, config, make_repo)
with prod:
prod.make_commits('a', Commit('x', tree={'x': '0'}), ref='heads/change')
@ -803,7 +735,7 @@ def test_approve_draft(env, config, make_repo, users):
assert pr.comments == [
(users['reviewer'], 'hansen r+'),
seen(env, pr, users),
(users['user'], f"I'm sorry, @{users['reviewer']}: draft PRs can not be approved."),
(users['user'], f"@{users['reviewer']} draft PRs can not be approved."),
]
with prod:
@ -818,8 +750,14 @@ def test_freeze(env, config, make_repo, users):
"""Freeze:
- should not forward-port the freeze PRs themselves
- unmerged forward ports need to be backfilled
- if the tip of the forward port is approved, the backfilled forward port
should also be
"""
project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
prod, _ = make_basic(env, config, make_repo, statuses='default')
project = env['runbot_merge.project'].search([])
# branches here are "a" (older), "b", and "c" (master)
with prod:
[root, _] = prod.make_commits(
@ -829,6 +767,22 @@ def test_freeze(env, config, make_repo, users):
ref='heads/b'
)
prod.make_commits(root, Commit('other', tree={'f': '1'}), ref='heads/c')
# region PR which is forward ported but the FPs are not merged (they are approved)
with prod:
prod.make_commits("a", Commit("stuff", tree={'x': '0'}), ref="heads/abranch")
p = prod.make_pr(target='a', head='abranch')
p.post_comment("hansen r+ fw=skipci", config['role_reviewer']['token'])
prod.post_status('abranch', 'success')
env.run_crons()
with prod:
prod.post_status('staging.a', 'success')
env.run_crons()
pr_a_id, pr_b_id, pr_c_id = pr_ids = env['runbot_merge.pull_requests'].search([], order='number')
assert len(pr_ids) == 3, \
"should have created two forward ports, one in b and one in c (/ master)"
# endregion
with prod:
prod.make_commits(
'c',
@ -838,6 +792,15 @@ def test_freeze(env, config, make_repo, users):
release = prod.make_pr(target='c', head='release-1.1')
env.run_crons()
# approve pr_c_id but don't actually merge it before freezing
with prod:
prod.post_status(pr_b_id.head, 'success')
prod.post_status(pr_c_id.head, 'success')
prod.get_pr(pr_c_id.number).post_comment('hansen r+', config['role_reviewer']['token'])
# review comment should be handled eagerly
assert pr_b_id.reviewed_by
assert pr_c_id.reviewed_by
w = project.action_prepare_freeze()
assert w['res_model'] == 'runbot_merge.project.freeze'
w_id = env[w['res_model']].browse([w['res_id']])
@ -848,20 +811,46 @@ def test_freeze(env, config, make_repo, users):
assert not w_id.errors
w_id.action_freeze()
assert project.branch_ids.mapped('name') == ['c', 'post-b', 'b', 'a']
# re-enable forward-port cron after freeze
_, cron_id = env['ir.model.data'].check_object_reference('forwardport', 'port_forward', context={'active_test': False})
env['ir.cron'].browse([cron_id]).active = True
# run crons to process the feedback, run a second time in case of e.g.
# forward porting
env.run_crons()
env.run_crons()
env.run_crons('forwardport.port_forward')
assert release_id.state == 'merged'
assert not env['runbot_merge.pull_requests'].search([
('state', '!=', 'merged')
('source_id', '=', release_id.id),
]), "the release PRs should not be forward-ported"
assert env['runbot_merge.stagings'].search_count([]) == 2,\
"b and c forward ports should be staged since they were ready before freeze"
# an intermediate PR should have been created
pr_inserted = env['runbot_merge.pull_requests'].search([
('source_id', '=', pr_a_id.id),
('target.name', '=', 'post-b'),
])
assert pr_inserted, "an intermediate PR should have been reinsered in the sequence"
assert pr_c_id.parent_id == pr_inserted
assert pr_inserted.parent_id == pr_b_id
assert pr_inserted.reviewed_by == pr_c_id.reviewed_by,\
"review state should have been copied over from c (master)"
with prod:
prod.post_status(pr_inserted.head, 'success')
prod.post_status('staging.b', 'success')
prod.post_status('staging.c', 'success')
env.run_crons()
with prod:
prod.post_status('staging.post-b', 'success')
env.run_crons()
assert env['runbot_merge.pull_requests'].search_count([('state', '=', 'merged')]) \
== len(['release', 'initial', 'fw-b', 'fw-post-b', 'fw-c'])
@pytest.mark.expect_log_errors(reason="missing / invalid head causes an error to be logged")
def test_missing_magic_ref(env, config, make_repo):
"""There are cases where github fails to create / publish or fails to update
the magic refs in refs/pull/*.
@ -873,7 +862,7 @@ def test_missing_magic_ref(env, config, make_repo):
Emulate this behaviour by updating the PR with a commit which lives in the
repo but has no ref.
"""
_, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
prod, _ = make_basic(env, config, make_repo)
a_head = prod.commit('refs/heads/a')
with prod:
[c] = prod.make_commits(a_head.id, Commit('x', tree={'x': '0'}), ref='heads/change')
@ -903,7 +892,7 @@ def test_missing_magic_ref(env, config, make_repo):
# check that the batch is still here and targeted for the future
req = env['forwardport.batches'].search([])
assert len(req) == 1
assert req.retry_after > datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
assert req.retry_after > datetime.utcnow().isoformat(" ", "seconds")
# reset retry_after
req.retry_after = '1900-01-01 01:01:01'
@ -912,7 +901,7 @@ def test_missing_magic_ref(env, config, make_repo):
[c2] = prod.make_commits(a_head.id, Commit('y', tree={'x': '0'}))
assert c2 != c
pr_id.head = c2
env.run_crons()
env.run_crons(None)
fp_id = env['runbot_merge.pull_requests'].search([('source_id', '=', pr_id.id)])
assert fp_id
@ -920,3 +909,308 @@ def test_missing_magic_ref(env, config, make_repo):
# what they are (rather than e.g. diff the HEAD it branch with the target)
# as a result it doesn't forwardport our fake, we'd have to reset the PR's
# branch for that to happen
def test_disable_branch_with_batches(env, config, make_repo, users):
"""We want to avoid losing pull requests, so when deactivating a branch,
if there are *forward port* batches targeting that branch which have not
been forward ported yet port them over, as if their source had been merged
after the branch was disabled (thus skipped over)
"""
repo, fork = make_basic(env, config, make_repo, statuses="default")
proj = env['runbot_merge.project'].search([])
branch_b = env['runbot_merge.branch'].search([('name', '=', 'b')])
assert branch_b
# region repo2 creation & setup
repo2 = make_repo('proj2')
with repo2:
[a, b, c] = repo2.make_commits(
None,
Commit("a", tree={"f": "a"}),
Commit("b", tree={"g": "b"}),
Commit("c", tree={"h": "c"}),
)
repo2.make_ref("heads/a", a)
repo2.make_ref("heads/b", b)
repo2.make_ref("heads/c", c)
fork2 = repo2.fork()
repo2_id = env['runbot_merge.repository'].create({
"project_id": proj.id,
"name": repo2.name,
"required_statuses": "default",
"fp_remote_target": fork2.name,
})
env['runbot_merge.events_sources'].create({'repository': repo2.name})
env['res.partner'].search([
('github_login', '=', config['role_reviewer']['user'])
]).write({
'review_rights': [(0, 0, {'repository_id': repo2_id.id, 'review': True})]
})
env['res.partner'].search([
('github_login', '=', config['role_self_reviewer']['user'])
]).write({
'review_rights': [(0, 0, {'repository_id': repo2_id.id, 'self_review': True})]
})
# endregion
# region set up forward ported batches
with repo, fork, repo2, fork2:
fork.make_commits("a", Commit("x", tree={"x": "1"}), ref="heads/x")
pr1_a = repo.make_pr(title="X", target="a", head=f"{fork.owner}:x")
pr1_a.post_comment("hansen r+", config['role_reviewer']['token'])
repo.post_status(pr1_a.head, "success")
fork2.make_commits("a", Commit("x", tree={"x": "1"}), ref="heads/x")
pr2_a = repo2.make_pr(title="X", target="a", head=f"{fork2.owner}:x")
pr2_a.post_comment("hansen r+", config['role_reviewer']['token'])
repo2.post_status(pr2_a.head, "success")
fork.make_commits("a", Commit("y", tree={"y": "1"}), ref="heads/y")
pr3_a = repo.make_pr(title="Y", target="a", head=f"{fork.owner}:y")
pr3_a.post_comment("hansen r+", config['role_reviewer']['token'])
repo.post_status(pr3_a.head, 'success')
# remove just pr2 from the forward ports (maybe?)
pr2_a_id = to_pr(env, pr2_a)
pr2_a_id.limit_id = branch_b.id
env.run_crons()
assert pr2_a_id.limit_id == branch_b
# endregion
with repo, repo2:
repo.post_status('staging.a', 'success')
repo2.post_status('staging.a', 'success')
env.run_crons()
PullRequests = env['runbot_merge.pull_requests']
pr1_b_id = PullRequests.search([('parent_id', '=', to_pr(env, pr1_a).id)])
pr2_b_id = PullRequests.search([('parent_id', '=', pr2_a_id.id)])
pr3_b_id = PullRequests.search([('parent_id', '=', to_pr(env, pr3_a).id)])
assert pr1_b_id.parent_id
assert pr1_b_id.state == 'opened'
assert pr2_b_id.parent_id
assert pr2_b_id.state == 'opened'
assert pr3_b_id.parent_id
assert pr3_b_id.state == 'opened'
# detach pr3 (?)
pr3_b_id.write({'parent_id': False, 'detach_reason': 'because'})
b_id = proj.branch_ids.filtered(lambda b: b.name == 'b')
proj.write({
'branch_ids': [(1, b_id.id, {'active': False})]
})
env.run_crons()
assert not b_id.active
# pr1_a, pr1_b, pr1_c, pr2_a, pr2_b, pr3_a, pr3_b, pr3_c
assert PullRequests.search_count([]) == 8, "should have ported pr1 and pr3 but not pr2"
assert PullRequests.search_count([('parent_id', '=', pr1_b_id.id)])
assert PullRequests.search_count([('parent_id', '=', pr3_b_id.id)])
assert repo.get_pr(pr1_b_id.number).comments == [
seen(env, repo.get_pr(pr1_b_id.number), users),
(users['user'], "This PR targets b and is part of the forward-port chain. Further PRs will be created up to c.\n\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n"),
(users['user'], "@{user} @{reviewer} the target branch 'b' has been disabled, you may want to close this PR.\n\nAs this was not its limit, it will automatically be forward ported to the next active branch.".format_map(users)),
]
assert repo2.get_pr(pr2_b_id.number).comments == [
seen(env, repo2.get_pr(pr2_b_id.number), users),
(users['user'], """\
@{user} @{reviewer} this PR targets b and is the last of the forward-port chain.
To merge the full chain, use
> @hansen r+
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
""".format_map(users)),
(users['user'], "@{user} @{reviewer} the target branch 'b' has been disabled, you may want to close this PR.".format_map(users)),
]
def test_disable_multitudes(env, config, make_repo, users, setreviewers):
"""Ensure that deactivation ports can jump over other deactivated branches.
"""
# region setup
repo = make_repo("bob")
project = env['runbot_merge.project'].create({
"name": "bob",
"github_token": config['github']['token'],
"github_prefix": "hansen",
"fp_github_token": config['github']['token'],
"fp_github_name": "herbert",
"branch_ids": [
(0, 0, {'name': 'a', 'sequence': 90}),
(0, 0, {'name': 'b', 'sequence': 80}),
(0, 0, {'name': 'c', 'sequence': 70}),
(0, 0, {'name': 'd', 'sequence': 60}),
],
"repo_ids": [(0, 0, {
'name': repo.name,
'required_statuses': 'default',
'fp_remote_target': repo.name,
})],
})
setreviewers(project.repo_ids)
env['runbot_merge.events_sources'].create({'repository': repo.name})
with repo:
[a, b, c, d] = repo.make_commits(
None,
Commit("a", tree={"branch": "a"}),
Commit("b", tree={"branch": "b"}),
Commit("c", tree={"branch": "c"}),
Commit("d", tree={"branch": "d"}),
)
repo.make_ref("heads/a", a)
repo.make_ref("heads/b", b)
repo.make_ref("heads/c", c)
repo.make_ref("heads/d", d)
# endregion
with repo:
[a] = repo.make_commits("a", Commit("X", tree={"x": "1"}), ref="heads/x")
pra = repo.make_pr(target="a", head="x")
pra.post_comment("hansen r+", config['role_reviewer']['token'])
repo.post_status(a, "success")
env.run_crons()
with repo:
repo.post_status('staging.a', 'success')
env.run_crons()
pra_id = to_pr(env, pra)
assert pra_id.state == 'merged'
prb_id = env['runbot_merge.pull_requests'].search([('target.name', '=', 'b')])
assert prb_id.parent_id == pra_id
project.write({
'branch_ids': [
(1, b.id, {'active': False})
for b in env['runbot_merge.branch'].search([('name', 'in', ['b', 'c'])])
]
})
env.run_crons()
# should not have ported prb to the disabled branch c
assert not env['runbot_merge.pull_requests'].search([('target.name', '=', 'c')])
# should have ported prb to the active branch d
prd_id = env['runbot_merge.pull_requests'].search([('target.name', '=', 'd')])
assert prd_id
assert prd_id.parent_id == prb_id
prb = repo.get_pr(prb_id.number)
assert prb.comments == [
seen(env, prb, users),
(users['user'], 'This PR targets b and is part of the forward-port chain. Further PRs will be created up to d.\n\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n'),
(users['user'], """\
@{user} @{reviewer} the target branch 'b' has been disabled, you may want to close this PR.
As this was not its limit, it will automatically be forward ported to the next active branch.\
""".format_map(users)),
]
prd = repo.get_pr(prd_id.number)
assert prd.comments == [
seen(env, prd, users),
(users['user'], """\
@{user} @{reviewer} this PR targets d and is the last of the forward-port chain.
To merge the full chain, use
> @hansen r+
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
""".format_map(users))
]
FMT = '%Y-%m-%d %H:%M:%S'
FAKE_PREV_WEEK = (datetime.now() + timedelta(days=1)).strftime(FMT)
def test_reminder_detached(env, config, make_repo, users):
"""On detached forward ports, both sides of the detachment should be notified.
"""
# region setup
prod, _ = make_basic(env, config, make_repo, statuses='default')
with prod:
prod.make_commits('a', Commit('c', tree={'x': '0'}), ref="heads/abranch")
pr_a = prod.make_pr(target='a', head='abranch')
prod.post_status('abranch', 'success')
pr_a.post_comment('hansen r+ fw=skipci', config['role_reviewer']['token'])
env.run_crons()
with prod:
prod.post_status('staging.a', 'success')
env.run_crons()
pr_a_id = to_pr(env, pr_a)
pr_b_id = env['runbot_merge.pull_requests'].search([
('target.name', '=', 'b'),
('parent_id', '=', pr_a_id.id),
])
assert pr_b_id
with prod:
prod.post_status(pr_b_id.head, 'success')
env.run_crons()
pr_c_id = env['runbot_merge.pull_requests'].search([
('target.name', '=', 'c'),
('parent_id', '=', pr_b_id.id),
])
assert pr_c_id
# endregion
pr_b = prod.get_pr(pr_b_id.number)
pr_c = prod.get_pr(pr_c_id.number)
# region sanity check
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
assert pr_b.comments == [
seen(env, pr_b, users),
(users['user'], """\
This PR targets b and is part of the forward-port chain. Further PRs will be created up to c.
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
""")], "the intermediate PR should not be reminded"
assert pr_c.comments == [
seen(env, pr_c, users),
(users['user'], """\
@%s @%s this PR targets c and is the last of the forward-port chain containing:
* %s
To merge the full chain, use
> @hansen r+
More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
""" % (
users['user'], users['reviewer'],
pr_b_id.display_name,
)),
(users['user'], "@%s @%s this forward port of %s is awaiting action (not merged or closed)." % (
users['user'],
users['reviewer'],
pr_a_id.display_name,
))
], "the final PR should be reminded"
# endregion
# region check detached
pr_c_id.write({'parent_id': False, 'detach_reason': 'because'})
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
assert pr_b.comments[2:] == [
(users['user'], "@%s @%s child PR %s was modified / updated and has become a normal PR. This PR (and any of its parents) will need to be merged independently as approvals won't cross." % (
users['user'],
users['reviewer'],
pr_c_id.display_name,
)),
(users['user'], "@%s @%s this forward port of %s is awaiting action (not merged or closed)." % (
users['user'],
users['reviewer'],
pr_a_id.display_name,
))
], "the detached-from intermediate PR should now be reminded"
assert pr_c.comments[3:] == [
(users['user'], "@%(user)s @%(reviewer)s this PR was modified / updated and has become a normal PR. It must be merged directly." % users),
(users['user'], "@%s @%s this forward port of %s is awaiting action (not merged or closed)." % (
users['user'],
users['reviewer'],
pr_a_id.display_name,
))
], "the final forward port should be reminded as before"
# endregion

View File

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import itertools
import re
import time
from lxml import html
@ -41,29 +42,76 @@ def _simple_init(repo):
prx = repo.make_pr(title='title', body='body', target='master', head=c2)
return prx
class re_matches:
class matches(str):
# necessary so str.__new__ does not freak out on `flags`
def __new__(cls, pattern, flags=0):
return super().__new__(cls, pattern)
def __init__(self, pattern, flags=0):
self._r = re.compile(pattern, flags)
p, n = re.subn(
# `re.escape` will escape the `$`, so we need to handle that...
# maybe it should not be $?
r'\\\$(\w*?)\\\$',
lambda m: f'(?P<{m[1]}>.*?)' if m[1] else '(.*?)',
re.escape(self),
)
assert n, f"matches' pattern should have at least one placeholder, found none in\n{pattern}"
self._r = re.compile(p, flags | re.DOTALL)
def __eq__(self, text):
return self._r.match(text)
def __repr__(self):
return self._r.pattern + '...'
if not isinstance(text, str):
return NotImplemented
return self._r.search(text)
def seen(env, pr, users):
return users['user'], f'[Pull request status dashboard]({to_pr(env, pr).url}).'
url = to_pr(env, pr).url
return users['user'], f'[![Pull request status dashboard]({url}.png)]({url})'
def make_basic(env, config, make_repo, *, reponame='proj', project_name='myproject'):
""" Creates a basic repo with 3 forking branches
def make_basic(
env,
config,
make_repo,
*,
project_name='myproject',
reponame='proj',
statuses='legal/cla,ci/runbot',
fp_token=True,
fp_remote=True,
):
""" Creates a project ``project_name`` **if none exists**, otherwise
retrieves the existing one and adds a new repository and its fork.
Repositories are setup with three forking branches:
::
f = 0 -- 1 -- 2 -- 3 -- 4 : a
|
g = `-- 11 -- 22 : b
|
h = `-- 111 : c
f = 0 -- 1 -- 2 -- 3 -- 4 : a
|
g = `-- 11 -- 22 : b
|
h = `-- 111 : c
each branch just adds and modifies a file (resp. f, g and h) through the
contents sequence a b c d e
:param env: Environment, for odoo model interactions
:param config: pytest project config thingie
:param make_repo: repo maker function, normally the fixture, should be a
``Callable[[str], Repo]``
:param project_name: internal project name, can be used to recover the
project object afterward, matches exactly since it's
unique per odoo db (and thus test)
:param reponame: the base name of the repository, for identification, for
concurrency reasons the actual repository name *will* be
different
:param statuses: required statuses for the repository, stupidly default to
the old Odoo statuses, should be moved to ``default`` over
time for simplicity (unless the test specifically calls for
multiple statuses)
:param fp_token: whether to set the ``fp_github_token`` on the project if
/ when creating it
:param fp_remote: whether to create a fork repo and set it as the
repository's ``fp_remote_target``
"""
Projects = env['runbot_merge.project']
project = Projects.search([('name', '=', project_name)])
@ -72,15 +120,17 @@ def make_basic(env, config, make_repo, *, reponame='proj', project_name='myproje
'name': project_name,
'github_token': config['github']['token'],
'github_prefix': 'hansen',
'fp_github_token': config['github']['token'],
'fp_github_token': fp_token and config['github']['token'],
'fp_github_name': 'herbert',
'branch_ids': [
(0, 0, {'name': 'a', 'sequence': 100, 'fp_target': True}),
(0, 0, {'name': 'b', 'sequence': 80, 'fp_target': True}),
(0, 0, {'name': 'c', 'sequence': 60, 'fp_target': True}),
(0, 0, {'name': 'a', 'sequence': 100}),
(0, 0, {'name': 'b', 'sequence': 80}),
(0, 0, {'name': 'c', 'sequence': 60}),
],
})
prod = make_repo(reponame)
env['runbot_merge.events_sources'].create({'repository': prod.name})
with prod:
a_0, a_1, a_2, a_3, a_4, = prod.make_commits(
None,
@ -102,12 +152,13 @@ def make_basic(env, config, make_repo, *, reponame='proj', project_name='myproje
Commit('111', tree={'h': 'a'}),
ref='heads/c',
)
other = prod.fork()
other = prod.fork() if fp_remote else None
repo = env['runbot_merge.repository'].create({
'project_id': project.id,
'name': prod.name,
'required_statuses': 'legal/cla,ci/runbot',
'fp_remote_target': other.name,
'required_statuses': statuses,
'fp_remote_target': other.name if other else False,
'group_id': False,
})
env['res.partner'].search([
('github_login', '=', config['role_reviewer']['user'])
@ -126,14 +177,26 @@ def pr_page(page, pr):
return html.fromstring(page(f'/{pr.repo.name}/pull/{pr.number}'))
def to_pr(env, pr):
pr = env['runbot_merge.pull_requests'].search([
('repository.name', '=', pr.repo.name),
('number', '=', pr.number),
])
assert len(pr) == 1, f"Expected to find {pr.repo.name}#{pr.number}, got {pr}."
return pr
for _ in range(5):
pr_id = env['runbot_merge.pull_requests'].search([
('repository.name', '=', pr.repo.name),
('number', '=', pr.number),
])
if pr_id:
assert len(pr_id) == 1, f"Expected to find {pr.repo.name}#{pr.number}, got {pr_id}."
return pr_id
time.sleep(1)
raise TimeoutError(f"Unable to find {pr.repo.name}#{pr.number}")
def part_of(label, pr_id, *, separator='\n\n'):
""" Adds the "part-of" pseudo-header in the footer.
"""
return f'{label}{separator}Part-of: {pr_id.display_name}'
return f"""\
{label}{separator}\
Part-of: {pr_id.display_name}
Signed-off-by: {pr_id.reviewed_by.formatted_email}"""
def ensure_one(records):
assert len(records) == 1
return records

View File

@ -1,46 +1,10 @@
import logging
from os import environ
import sentry_sdk
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
from odoo import http
from . import models, controllers
from .sentry import enable_sentry
def delegate(self, attr):
return getattr(self.app, attr)
SentryWsgiMiddleware.__getattr__ = delegate
def enable_sentry():
logger = logging.getLogger('runbot_merge')
dsn = environ.get('SENTRY_DSN')
if not dsn:
logger.info("No DSN found, skipping sentry...")
return
try:
sentry_sdk.init(
dsn,
integrations=[
# note: if the colorformatter is enabled, sentry gets lost
# and classifies everything as errors because it fails to
# properly classify levels as the colorformatter injects
# the ANSI color codes right into LogRecord.levelname
LoggingIntegration(level=logging.INFO, event_level=logging.WARNING),
]
)
http.root = SentryWsgiMiddleware(http.root)
except Exception:
logger.exception("DSN found, failed to enable sentry...")
else:
logger.info("DSN found, sentry enabled...")
def _check_citext(cr):
cr.execute("select 1 from pg_extension where extname = 'citext'")
if not cr.rowcount:
def _check_citext(env):
env.cr.execute("select 1 from pg_extension where extname = 'citext'")
if not env.cr.rowcount:
try:
cr.execute('create extension citext')
env.cr.execute('create extension citext')
except Exception:
raise AssertionError("runbot_merge needs the citext extension")

View File

@ -1,14 +1,18 @@
{
'name': 'merge bot',
'version': '1.7',
'depends': ['contacts', 'website'],
'version': '1.15',
'depends': ['contacts', 'mail', 'website'],
'data': [
'security/security.xml',
'security/ir.model.access.csv',
'data/merge_cron.xml',
'models/crons/git_maintenance.xml',
'models/crons/cleanup_scratch_branches.xml',
'data/runbot_merge.pull_requests.feedback.template.csv',
'views/res_partner.xml',
'views/runbot_merge_project.xml',
'views/batch.xml',
'views/mergebot.xml',
'views/queues.xml',
'views/configuration.xml',

View File

@ -0,0 +1,6 @@
IMP: optimize home page
An unnecessary deopt and a few opportunities were found and fixed in the home
page / main dashboard, a few improvements have been implemented which should
significantly lower the number of SQL queries and the time needed to generate
the page.

View File

@ -0,0 +1,6 @@
ADD: stagings reverse index (from commits)
Finding out the commits from a staging is not great but it's easy enough, the
reverse was difficult and very inefficient. Splat out the "heads" JSON field
into two join tables, and provide both ORM methods and a JSON endpoint to
lookup stagings based on their commits.

View File

@ -0,0 +1,5 @@
IMP: added quick jump from staging to PR in the backend
In the backend, going through the batches to reach a PR is really not
convenient, directly displaying both github URL and frontend URL for each PR
makes jumping around much easier.

View File

@ -0,0 +1,8 @@
IMP: allow setting forward-port limits after the source pull request has been merged
Should now be possible to both extend and retract the forward port limit
afterwards, though obviously no shorter than the current tip of the forward
port sequence. One limitation is that forward ports being created can't be
stopped so there might be some windows where trying to set the limit to the
current tip will fail (because it's in the process of being forward-ported to
the next branch).

View File

@ -0,0 +1,57 @@
CHG: complete rework of the commands system
# fun is dead: strict commands parsing
Historically the bots would apply whatever looked like a command and ignore the
rest. This led to people sending novels to the bot, then being surprised the bot
found a command in the mess.
The bots now ignore all lines which contain any non-command. Example:
> @robodoo r+ when green darling
Previously, the bot would apply the `r+` and ignore the rest. Now the bot will
ignore everything and reply with
> unknown command "when"
# fwbot is dead
The mergebot (@robodoo) is now responsible for the old fwbot commands:
- close, ignore, up to, ... work as they ever did, just with robodoo
- `robodoo r+` now approves the parents if the current PR a forward port
- a specific PR can be approved even in forward ports by providing its number
e.g. `robodoo r=45328` will approve just PR 45328, if that is the PR the
comment is being posted on or one of its parents
- the approval of forward ports won't skip over un-approvable PRs anymore
- the rights of the original author have been restricted slightly: they can
only approve the direct descendents of merged PRs, so if one of the parents
has been modified and is not merged yet, the original author can't approve,
nor can they approve the modified PR, or a conflicting PR which has to get
fixed (?)
# no more p=<number>
The old priorities command was a tangle of multiple concerns, not all of which
were always desired or applicable. These tangles have been split along their
various axis.
# listing
The new commands are:
- `default`, sets the staging priority back to the default
- `priority`, sets the staging priority to elevated, on staging these PRs are
staged first, then the `normal` PRs are added
- `alone`, sets the staging priority to high, these PRs are staged before
considering splits, and only `alone` PRs are staged together even if the batch
is not full
- `fw=default`, processes forward ports normally
- `fw=skipci`, once the current PR has been merged creates all the forward ports
without waiting for each to have valid statuses
- `fw=skipmerge`, immediately create all forward ports even if the base pull
request has not even been merged yet
- `skipchecks`, makes the entire batch (target PR and any linked PR) immediately
ready, bypassing statuses and reviews
- `cancel`, cancels the staging on the target branch, if any

View File

@ -0,0 +1,4 @@
ADD: projects now know how to prioritise new PRs over splits
While this likely has relatively low utility, we'll look at how it performs
during periods of high throughput.

View File

@ -0,0 +1,14 @@
ADD: stagings can now be disabled on a per-project basis
Currently stopping stagings requires stopping the staging cron(s), which causes
several issues:
- the staging cron runs very often, so it can be difficult to find a window to
deactivate it (as the cron runner acquires an exclusive lock on the cron)
- the staging cron is global, so it does not disable staging only on the
problematic project (to say nothing of branch) but on all of them
The latter is not currently a huge issue as only one of the mergebot-tracked
projects is ultra active (spreadsheet activity is on the order of a few
single-PR stagings a day), but the former is really annoying when trying to
stop runaway broken stagings.

View File

@ -0,0 +1,10 @@
IMP: PR descriptions are now markdown-rendered in the dashboard
Previously the raw text was displayed. The main advantage of rendering, aside
from not splatting huge links in the middle of the thing, is that we can
autolink *odoo tasks* if they're of a pattern we recognize. Some support has
also been added for github's references to mirror GFM rendering.
This would be a lot less useful (and in fact pretty much useless) if we could
use github's built-in [references to external resources](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/managing-repository-settings/configuring-autolinks-to-reference-external-resources)
sadly that seems to not be available on our plan.

View File

@ -3,6 +3,7 @@ import hmac
import logging
import json
import sentry_sdk
import werkzeug.exceptions
from odoo.http import Controller, request, route
@ -14,44 +15,126 @@ from .. import utils, github
_logger = logging.getLogger(__name__)
class MergebotController(Controller):
@route('/runbot_merge/stagings', auth='none', type='json')
def stagings_for_commits(self, commits=None, heads=None):
Stagings = request.env(user=1)['runbot_merge.stagings'].sudo()
if commits:
stagings = Stagings.for_commits(*commits)
elif heads:
stagings = Stagings.for_heads(*heads)
else:
raise ValueError('Must receive one of "commits" or "heads" kwarg')
return stagings.ids
@route('/runbot_merge/stagings/<int:staging>', auth='none', type='json')
def prs_for_staging(self, staging):
staging = request.env(user=1)['runbot_merge.stagings'].browse(staging)
return [
batch.prs.mapped(lambda p: {
'name': p.display_name,
'repository': p.repository.name,
'number': p.number,
})
for batch in staging.sudo().batch_ids
]
@route('/runbot_merge/stagings/<int:from_staging>/<int:to_staging>', auth='none', type='json')
def prs_for_stagings(self, from_staging, to_staging, include_from=True, include_to=True):
Stagings = request.env(user=1, context={"active_test": False})['runbot_merge.stagings']
from_staging = Stagings.browse(from_staging)
to_staging = Stagings.browse(to_staging)
if from_staging.target != to_staging.target:
raise ValueError(f"Stagings must have the same target branch, found {from_staging.target.name} and {to_staging.target.name}")
if from_staging.id >= to_staging.id:
raise ValueError("first staging must be older than second staging")
stagings = Stagings.search([
('target', '=', to_staging.target.id),
('state', '=', 'success'),
('id', '>=' if include_from else '>', from_staging.id),
('id', '<=' if include_to else '<', to_staging.id),
], order="id asc")
return [
{
'staging': staging.id,
'prs': [
batch.prs.mapped(lambda p: {
'name': p.display_name,
'repository': p.repository.name,
'number': p.number,
})
for batch in staging.batch_ids
]
}
for staging in stagings
]
@route('/runbot_merge/hooks', auth='none', type='json', csrf=False, methods=['POST'])
def index(self):
req = request.httprequest
event = req.headers['X-Github-Event']
with sentry_sdk.configure_scope() as scope:
if scope.transaction:
# only in 1.8.0 (or at least 1.7.2
if hasattr(scope, 'set_transaction_name'):
scope.set_transaction_name(f"webhook {event}")
else: # but our servers use 1.4.3
scope.transaction = f"webhook {event}"
github._gh.info(self._format(req))
data = request.get_json_data()
repo = data.get('repository', {}).get('full_name')
env = request.env(user=1)
source = repo and env['runbot_merge.events_sources'].search([('repository', '=', repo)])
if not source:
_logger.warning(
"Ignored hook %s to unknown source repository %s",
req.headers.get("X-Github-Delivery"),
repo,
)
return werkzeug.exceptions.Forbidden()
elif secret := source.secret:
signature = 'sha256=' + hmac.new(secret.strip().encode(), req.get_data(), hashlib.sha256).hexdigest()
if not hmac.compare_digest(signature, req.headers.get('X-Hub-Signature-256', '')):
_logger.warning(
"Ignored hook %s with incorrect signature on %s: got %s expected %s, in:\n%s",
req.headers.get('X-Github-Delivery'),
repo,
req.headers.get('X-Hub-Signature-256'),
signature,
req.headers,
)
return werkzeug.exceptions.Forbidden()
elif req.headers.get('X-Hub-Signature-256'):
_logger.info("No secret for %s but received a signature in:\n%s", repo, req.headers)
else:
_logger.info("No secret or signature for %s", repo)
c = EVENTS.get(event)
if not c:
_logger.warning('Unknown event %s', event)
return 'Unknown event {}'.format(event)
repo = request.jsonrequest['repository']['full_name']
env = request.env(user=1)
secret = env['runbot_merge.repository'].search([
('name', '=', repo),
]).project_id.secret
if secret:
signature = 'sha1=' + hmac.new(secret.encode('ascii'), req.get_data(), hashlib.sha1).hexdigest()
if not hmac.compare_digest(signature, req.headers.get('X-Hub-Signature', '')):
_logger.warning("Ignored hook with incorrect signature %s",
req.headers.get('X-Hub-Signature'))
return werkzeug.exceptions.Forbidden()
return c(env, request.jsonrequest)
sentry_sdk.set_context('webhook', data)
return c(env, data)
def _format(self, request):
return """<= {r.method} {r.full_path}
return """{r.method} {r.full_path}
{headers}
{body}
vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\
""".format(
r=request,
headers='\n'.join(
'\t%s: %s' % entry for entry in request.headers.items()
),
body=utils.shorten(request.get_data(as_text=True).strip(), 400)
body=request.get_data(as_text=True),
)
def handle_pr(env, event):
@ -99,7 +182,7 @@ def handle_pr(env, event):
return env['runbot_merge.pull_requests'].search([
('repository', '=', repo.id),
('number', '=', pr['number']),
('target', '=', target.id),
# ('target', '=', target.id),
])
# edition difficulty: pr['base']['ref] is the *new* target, the old one
# is at event['change']['base']['ref'] (if the target changed), so edition
@ -143,18 +226,26 @@ def handle_pr(env, event):
message = None
if not branch:
message = f"This PR targets the un-managed branch {r}:{b}, it needs to be retargeted before it can be merged."
message = env.ref('runbot_merge.handle.branch.unmanaged')._format(
repository=r,
branch=b,
event=event,
)
_logger.info("Ignoring event %s on PR %s#%d for un-managed branch %s",
event['action'], r, pr['number'], b)
elif not branch.active:
message = f"This PR targets the disabled branch {r}:{b}, it needs to be retargeted before it can be merged."
message = env.ref('runbot_merge.handle.branch.inactive')._format(
repository=r,
branch=b,
event=event,
)
if message and event['action'] not in ('synchronize', 'closed'):
feedback(message=message)
if not branch:
return "Not set up to care about {}:{}".format(r, b)
headers = request.httprequest.headers if request.httprequest else {}
headers = request.httprequest.headers if request else {}
_logger.info(
"%s: %s#%s (%s) (by %s, delivery %s by %s)",
event['action'],
@ -164,6 +255,11 @@ def handle_pr(env, event):
headers.get('X-Github-Delivery'),
headers.get('User-Agent'),
)
sender = env['res.partner'].search([('github_login', '=', event['sender']['login'])], limit=1)
if not sender:
sender = env['res.partner'].create({'name': event['sender']['login'], 'github_login': event['sender']['login']})
env['runbot_merge.pull_requests']._track_set_author(sender, fallback=True)
if event['action'] == 'opened':
author_name = pr['user']['login']
author = env['res.partner'].search([('github_login', '=', author_name)], limit=1)
@ -172,7 +268,7 @@ def handle_pr(env, event):
pr_obj = env['runbot_merge.pull_requests']._from_gh(pr)
return "Tracking PR as {}".format(pr_obj.id)
pr_obj = env['runbot_merge.pull_requests']._get_or_schedule(r, pr['number'])
pr_obj = env['runbot_merge.pull_requests']._get_or_schedule(r, pr['number'], closing=event['action'] == 'closed')
if not pr_obj:
_logger.info("webhook %s on unknown PR %s#%s, scheduled fetch", event['action'], repo.name, pr['number'])
return "Unknown PR {}:{}, scheduling fetch".format(repo.name, pr['number'])
@ -203,7 +299,8 @@ def handle_pr(env, event):
)
pr_obj.write({
'state': 'opened',
'reviewed_by': False,
'error': False,
'head': pr['head']['sha'],
'squash': pr['commits'] == 1,
})
@ -227,26 +324,25 @@ def handle_pr(env, event):
oldstate,
)
return 'Closed {}'.format(pr_obj.display_name)
else:
_logger.warning(
'%s tried to close %s (state=%s)',
event['sender']['login'],
pr_obj.display_name,
oldstate,
)
return 'Ignored: could not lock rows (probably being merged)'
_logger.info(
'%s tried to close %s (state=%s) but locking failed',
event['sender']['login'],
pr_obj.display_name,
oldstate,
)
return 'Ignored: could not lock rows (probably being merged)'
if event['action'] == 'reopened' :
if pr_obj.state == 'merged':
feedback(
close=True,
message="@%s ya silly goose you can't reopen a merged PR." % event['sender']['login']
message=env.ref('runbot_merge.handle.pr.merged')._format(event=event),
)
if pr_obj.state == 'closed':
elif pr_obj.closed:
_logger.info('%s reopening %s', event['sender']['login'], pr_obj.display_name)
pr_obj.write({
'state': 'opened',
'closed': False,
# updating the head triggers a revalidation
'head': pr['head']['sha'],
'squash': pr['commits'] == 1,
@ -279,6 +375,7 @@ def handle_status(env, event):
statuses = c.statuses::jsonb || EXCLUDED.statuses::jsonb
WHERE NOT c.statuses::jsonb @> EXCLUDED.statuses::jsonb
""", [event['sha'], status_value])
env.ref("runbot_merge.process_updated_commits")._trigger()
return 'ok'
@ -290,6 +387,10 @@ def handle_comment(env, event):
issue = event['issue']['number']
author = event['comment']['user']['login']
comment = event['comment']['body']
if len(comment) > 5000:
_logger.warning('comment(%s): %s %s#%s => ignored (%d characters)', event['comment']['html_url'], author, repo, issue, len(comment))
return "ignored: too big"
_logger.info('comment[%s]: %s %s#%s %r', event['action'], author, repo, issue, comment)
if event['action'] != 'created':
return "Ignored: action (%r) is not 'created'" % event['action']
@ -301,6 +402,9 @@ def handle_review(env, event):
pr = event['pull_request']['number']
author = event['review']['user']['login']
comment = event['review']['body'] or ''
if len(comment) > 5000:
_logger.warning('comment(%s): %s %s#%s => ignored (%d characters)', event['review']['html_url'], author, repo, pr, len(comment))
return "ignored: too big"
_logger.info('review[%s]: %s %s#%s %r', event['action'], author, repo, pr, comment)
if event['action'] != 'submitted':
@ -311,7 +415,7 @@ def handle_review(env, event):
target=event['pull_request']['base']['ref'])
def handle_ping(env, event):
print("Got ping! {}".format(event['zen']))
_logger.info("Got ping! %s", event['zen'])
return "pong"
EVENTS = {

View File

@ -1,36 +1,80 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
import base64
import collections
import colorsys
import hashlib
import io
import json
import logging
import pathlib
from dataclasses import dataclass
from email.utils import formatdate
from enum import Flag, auto
from functools import cached_property
from itertools import chain, product
from math import ceil
from typing import Tuple, cast, Mapping, Optional, List
import markdown
import markupsafe
import werkzeug.exceptions
import werkzeug.wrappers
from PIL import Image, ImageDraw, ImageFont
from odoo.http import Controller, route, request
from odoo.tools import file_open
HORIZONTAL_PADDING = 20
VERTICAL_PADDING = 5
_logger = logging.getLogger(__name__)
LIMIT = 20
class MergebotDashboard(Controller):
@route('/runbot_merge', auth="public", type="http", website=True)
@route('/runbot_merge', auth="public", type="http", website=True, sitemap=True)
def dashboard(self):
projects = request.env['runbot_merge.project'].with_context(active_test=False).sudo().search([])
stagings = {
branch: projects.env['runbot_merge.stagings'].search([
('target', '=', branch.id)], order='staged_at desc', limit=6)
for project in projects
for branch in project.branch_ids
if branch.active
}
prefetch_set = list({
id
for stagings in stagings.values()
for id in stagings.ids
})
for st in stagings.values():
st._prefetch_ids = prefetch_set
return request.render('runbot_merge.dashboard', {
'projects': request.env['runbot_merge.project'].with_context(active_test=False).sudo().search([]),
'projects': projects,
'stagings_map': stagings,
})
@route('/runbot_merge/<int:branch_id>', auth='public', type='http', website=True)
def stagings(self, branch_id, until=None):
@route('/runbot_merge/<int:branch_id>', auth='public', type='http', website=True, sitemap=False)
def stagings(self, branch_id, until=None, state=''):
branch = request.env['runbot_merge.branch'].browse(branch_id).sudo().exists()
if not branch:
raise werkzeug.exceptions.NotFound()
stagings = request.env['runbot_merge.stagings'].with_context(active_test=False).sudo().search([
('target', '=', branch.id),
('staged_at', '<=', until) if until else (True, '=', True),
], order='staged_at desc', limit=LIMIT+1)
staging_domain = [('target', '=', branch.id)]
if until:
staging_domain.append(('staged_at', '<=', until))
if state:
staging_domain.append(('state', '=', state))
stagings = request.env['runbot_merge.stagings'].with_context(active_test=False).sudo().search(staging_domain, order='staged_at desc', limit=LIMIT + 1)
return request.render('runbot_merge.branch_stagings', {
'branch': branch,
'stagings': stagings[:LIMIT],
'until': until,
'state': state,
'next': stagings[-1].staged_at if len(stagings) > LIMIT else None,
})
@ -49,7 +93,7 @@ class MergebotDashboard(Controller):
entries.setdefault(key, []).extend(map(item_converter, items))
return entries
@route('/runbot_merge/changelog', auth='public', type='http', website=True)
@route('/runbot_merge/changelog', auth='public', type='http', website=True, sitemap=True)
def changelog(self):
md = markdown.Markdown(extensions=['nl2br'], output_format='html5')
entries = self.entries(lambda t: markupsafe.Markup(md.convert(t)))
@ -57,8 +101,8 @@ class MergebotDashboard(Controller):
'entries': entries,
})
@route('/<org>/<repo>/pull/<int(min=1):pr>', auth='public', type='http', website=True)
def pr(self, org, repo, pr):
@route('/<org>/<repo>/pull/<int(min=1):pr><any("", ".png"):png>', auth='public', type='http', website=True, sitemap=False)
def pr(self, org, repo, pr, png):
pr_id = request.env['runbot_merge.pull_requests'].sudo().search([
('repository.name', '=', f'{org}/{repo}'),
('number', '=', int(pr)),
@ -66,8 +110,17 @@ class MergebotDashboard(Controller):
if not pr_id:
raise werkzeug.exceptions.NotFound()
if not pr_id.repository.group_id <= request.env.user.groups_id:
_logger.warning(
"Access error: %s (%s) tried to access %s but lacks access",
request.env.user.login,
request.env.user.name,
pr_id.display_name,
)
raise werkzeug.exceptions.NotFound()
if png:
return raster_render(pr_id)
st = {}
if pr_id.statuses:
# normalise `statuses` to map to a dict
@ -80,3 +133,425 @@ class MergebotDashboard(Controller):
'merged_head': json.loads(pr_id.commits_map).get(''),
'statuses': st
})
def raster_render(pr):
default_headers = {
'Content-Type': 'image/png',
'Last-Modified': formatdate(),
# - anyone can cache the image, so public
# - crons run about every minute so that's how long a request is fresh
# - if the mergebot can't be contacted, allow using the stale response (no must-revalidate)
# - intermediate caches can recompress the PNG if they want (pillow is not a very good PNG generator)
# - the response is mutable even during freshness, technically (as there
# is no guarantee the freshness window lines up with the cron, plus
# some events are not cron-based)
# - maybe don't allow serving the stale image *while* revalidating?
# - allow serving a stale image for a day if the server returns 500
'Cache-Control': 'public, max-age=60, stale-if-error=86400',
}
if if_none_match := request.httprequest.headers.get('If-None-Match'):
# just copy the existing value out if we received any
default_headers['ETag'] = if_none_match
# weak validation: check the latest modification date of all objects involved
project, repos, branches, genealogy = pr.env.ref('runbot_merge.dashboard-pre')\
._run_action_code_multi({'pr': pr})
# last-modified should be in RFC2822 format, which is what
# email.utils.formatdate does (sadly takes a timestamp but...)
last_modified = formatdate(max((
o.write_date
for o in chain(
project,
repos,
branches,
genealogy,
genealogy.all_prs | pr,
)
)).timestamp())
# The (304) response must not contain a body and must include the headers
# that would have been sent in an equivalent 200 OK response
headers = {**default_headers, 'Last-Modified': last_modified}
if request.httprequest.headers.get('If-Modified-Since') == last_modified:
return werkzeug.wrappers.Response(status=304, headers=headers)
batches = pr.env.ref('runbot_merge.dashboard-prep')._run_action_code_multi({
'pr': pr,
'repos': repos,
'branches': branches,
'genealogy': genealogy,
})
etag = hashlib.sha256(f"(P){pr.id},{pr.repository.id},{pr.target.id},{pr.batch_id.blocked}".encode())
# repos and branches should be in a consistent order so can just hash that
etag.update(''.join(f'(R){r.name}' for r in repos).encode())
etag.update(''.join(f'(T){b.name},{b.active}' for b in branches).encode())
# and product of deterministic iterations should be deterministic
for r, b in product(repos, branches):
ps = batches[r, b]
etag.update(f"(B){ps['state']},{ps['detached']},{ps['active']}".encode())
etag.update(''.join(
f"(PS){p['label']},{p['closed']},{p['number']},{p['checked']},{p['reviewed']},{p['attached']},{p['pr'].staging_id.id}"
for p in ps['prs']
).encode())
etag = headers['ETag'] = base64.b32encode(etag.digest()).decode()
if if_none_match == etag:
return werkzeug.wrappers.Response(status=304, headers=headers)
if not pr.batch_id.target:
im = render_inconsistent_batch(pr.batch_id)
else:
im = render_full_table(pr, branches, repos, batches)
buffer = io.BytesIO()
im.save(buffer, 'png', optimize=True)
return werkzeug.wrappers.Response(buffer.getvalue(), headers=headers)
class Decoration(Flag):
STRIKETHROUGH = auto()
@dataclass(frozen=True)
class Text:
content: str
font: ImageFont.FreeTypeFont
color: Color
decoration: Decoration = Decoration(0)
@cached_property
def width(self) -> int:
return ceil(self.font.getlength(self.content))
@property
def height(self) -> int:
return sum(self.font.getmetrics())
def draw(self, image: ImageDraw.ImageDraw, left: int, top: int):
image.text((left, top), self.content, fill=self.color, font=self.font)
if Decoration.STRIKETHROUGH in self.decoration:
x1, _, x2, _ = self.font.getbbox(self.content)
_, y1, _, y2 = self.font.getbbox('x')
# put the strikethrough line about 1/3rd down the x (default seems
# to be a bit above halfway down but that's ugly with numbers which
# is most of our stuff)
y = top + y1 + (y2 - y1) / 3
image.line([(left + x1, y), (left + x2, y)], self.color)
@dataclass(frozen=True)
class Checkbox:
checked: Optional[bool]
font: ImageFont.FreeTypeFont
color: Color
success: Color
error: Color
@cached_property
def width(self) -> int:
return ceil(max(
self.font.getlength(BOX_EMPTY),
self.font.getlength(CHECK_MARK),
self.font.getlength(CROSS),
))
@property
def height(self):
return sum(self.font.getmetrics())
def draw(self, image: ImageDraw.ImageDraw, left: int, top: int):
image.text((left, top+5), BOX_EMPTY, fill=self.color, font=self.font)
if self.checked is True:
image.text((left, top+4), CHECK_MARK, fill=self.success, font=self.font)
elif self.checked is False:
image.text((left, top+4), CROSS, fill=self.error, font=self.font)
@dataclass(frozen=True)
class Line:
spans: List[Text | Checkbox | Lines]
@property
def width(self) -> int:
return sum(s.width for s in self.spans)
@property
def height(self) -> int:
return max(s.height for s in self.spans) if self.spans else 0
def draw(self, image: ImageDraw.ImageDraw, left: int, top: int):
for span in self.spans:
span.draw(image, left, top)
left += span.width
@dataclass(frozen=True)
class Lines:
lines: List[Line]
@property
def width(self) -> int:
return max(l.width for l in self.lines)
@property
def height(self) -> int:
return sum(l.height for l in self.lines)
def draw(self, image: ImageDraw.ImageDraw, left: int, top: int):
for line in self.lines:
line.draw(image, left, top)
top += line.height
@dataclass(frozen=True)
class Cell:
content: Lines | Line | Text
background: Color = (255, 255, 255)
attached: bool = True
@cached_property
def width(self) -> int:
return self.content.width + 2 * HORIZONTAL_PADDING
@cached_property
def height(self) -> int:
return self.content.height + 2 * VERTICAL_PADDING
def render_full_table(pr, branches, repos, batches):
with file_open('web/static/fonts/google/Open_Sans/Open_Sans-Regular.ttf', 'rb') as f:
font = ImageFont.truetype(f, size=16, layout_engine=0)
f.seek(0)
supfont = ImageFont.truetype(f, size=13, layout_engine=0)
with file_open('web/static/fonts/google/Open_Sans/Open_Sans-Bold.ttf', 'rb') as f:
bold = ImageFont.truetype(f, size=16, layout_engine=0)
with file_open('web/static/src/libs/fontawesome/fonts/fontawesome-webfont.ttf', 'rb') as f:
icons = ImageFont.truetype(f, size=16, layout_engine=0)
rowheights = collections.defaultdict(int)
colwidths = collections.defaultdict(int)
cells = {}
for b in chain([None], branches):
for r in chain([None], repos):
opacity = 1.0 if b is None or b.active else 0.5
current_row = b == pr.target
background = BG['info'] if current_row or r == pr.repository else BG[None]
if b is None: # first row
cell = Cell(Text("" if r is None else r.name, bold, TEXT), background)
elif r is None: # first column
cell = Cell(Text(b.name, font, blend(TEXT, opacity, over=background)), background)
elif current_row:
ps = batches[r, b]
bgcolor = lighten(BG[ps['state']], by=-0.05) if pr in ps['pr_ids'] else BG[ps['state']]
background = blend(bgcolor, opacity, over=background)
foreground = blend((39, 110, 114), opacity, over=background)
success = blend(SUCCESS, opacity, over=background)
error = blend(ERROR, opacity, over=background)
boxes = {
False: Checkbox(False, icons, foreground, success, error),
True: Checkbox(True, icons, foreground, success, error),
None: Checkbox(None, icons, foreground, success, error),
}
prs = []
attached = True
for p in ps['prs']:
pr = p['pr']
attached = attached and p['attached']
if pr.staging_id:
sub = ": is staged"
elif pr.error:
sub = ": staging failed"
else:
sub = ""
lines = [
Line([Text(
f"#{p['number']}{sub}",
font,
foreground,
decoration=Decoration.STRIKETHROUGH if p['closed'] else Decoration(0),
)]),
]
# no need for details if closed or in error
if pr.state not in ('merged', 'closed', 'error') and not pr.staging_id:
if pr.draft:
lines.append(Line([boxes[False], Text("is in draft", font, error)]))
lines.extend([
Line([
boxes[bool(pr.squash or pr.merge_method)],
Text(
"merge method: {}".format('single' if pr.squash else (pr.merge_method or 'missing')),
font,
foreground if pr.squash or pr.merge_method else error,
),
]),
Line([
boxes[bool(pr.reviewed_by)],
Text(
"Reviewed" if pr.reviewed_by else "Not Reviewed",
font,
foreground if pr.reviewed_by else error,
)
]),
Line([
boxes[pr.batch_id.skipchecks or pr.status == 'success'],
Text("CI", font, foreground if pr.batch_id.skipchecks or pr.status == 'success' else error),
]),
])
if not pr.batch_id.skipchecks:
statuses = json.loads(pr.statuses_full)
for ci in pr.repository.status_ids._for_pr(pr):
st = (statuses.get(ci.context.strip()) or {'state': 'pending'})['state']
color = foreground
if st in ('error', 'failure'):
color = error
box = boxes[False]
elif st == 'success':
box = boxes[True]
else:
box = boxes[None]
lines.append(Line([
Text(" - ", font, color),
box,
Text(f"{ci.repo_id.name}: {ci.context}", font, color)
]))
prs.append(Lines(lines))
cell = Cell(Line(prs), background, attached)
else:
ps = batches[r, b]
bgcolor = lighten(BG[ps['state']], by=-0.05) if pr in ps['pr_ids'] else BG[ps['state']]
background = blend(bgcolor, opacity, over=background)
foreground = blend((39, 110, 114), opacity, over=background)
line = []
attached = True
for p in ps['prs']:
line.append(Text(
f"#{p['number']}",
font,
foreground,
decoration=Decoration.STRIKETHROUGH if p['closed'] else Decoration(0),
))
attached = attached and p['attached']
for attribute in filter(None, [
'error' if p['pr'].error else '',
'' if p['checked'] else 'missing statuses',
'' if p['reviewed'] else 'missing r+',
'' if p['attached'] else 'detached',
'staged' if p['pr'].staging_id else 'ready' if p['pr']._ready else ''
]):
color = SUCCESS if attribute in ('staged', 'ready') else ERROR
line.append(Text(f' {attribute}', supfont, blend(color, opacity, over=background)))
line.append(Text(" ", font, foreground))
cell = Cell(Line(line), background, attached)
cells[r, b] = cell
rowheights[b] = max(rowheights[b], cell.height)
colwidths[r] = max(colwidths[r], cell.width)
im = Image.new("RGB", (sum(colwidths.values()), sum(rowheights.values())), "white")
# no need to set the font here because every text element has its own
draw = ImageDraw.Draw(im, 'RGB')
top = 0
for b in chain([None], branches):
left = 0
for r in chain([None], repos):
cell = cells[r, b]
# for a given cell, we first print the background, then the text, then
# the borders
# need to subtract 1 because pillow uses inclusive rect coordinates
right = left + colwidths[r] - 1
bottom = top + rowheights[b] - 1
draw.rectangle(
(left, top, right, bottom),
cell.background,
)
# draw content adding padding
cell.content.draw(draw, left=left + HORIZONTAL_PADDING, top=top + VERTICAL_PADDING)
# draw bottom-right border
draw.line([
(left, bottom),
(right, bottom),
(right, top),
], fill=(172, 176, 170))
if not cell.attached:
# overdraw previous cell's bottom border
draw.line([(left, top-1), (right-1, top-1)], fill=ERROR)
left += colwidths[r]
top += rowheights[b]
return im
def render_inconsistent_batch(batch):
"""If a batch has inconsistent targets, just point out the inconsistency by
listing the PR and targets
"""
with file_open('web/static/fonts/google/Open_Sans/Open_Sans-Regular.ttf', 'rb') as f:
font = ImageFont.truetype(f, size=16, layout_engine=0)
im = Image.new("RGB", (4000, 4000), color=BG['danger'])
w = h = 0
def draw(label, draw=ImageDraw.Draw(im)):
nonlocal w, h
draw.text((0, h), label, fill=blend(ERROR, 1.0, over=BG['danger']), font=font)
_, _, ww, hh = font.getbbox(label)
w = max(w, ww)
h += hh
draw(" Inconsistent targets:")
for p in batch.prs:
draw(f"{p.display_name} has target '{p.target.name}'")
draw(" To resolve, either retarget or close the mis-targeted pull request(s).")
return im.crop((0, 0, w+10, h+5))
Color = Tuple[int, int, int]
TEXT: Color = (102, 102, 102)
ERROR: Color = (220, 53, 69)
SUCCESS: Color = (40, 167, 69)
BG: Mapping[str | None, Color] = collections.defaultdict(lambda: (255, 255, 255), {
'info': (217, 237, 247),
'success': (223, 240, 216),
'warning': (252, 248, 227),
'danger': (242, 222, 222),
})
CHECK_MARK = "\uf00c"
CROSS = "\uf00d"
BOX_EMPTY = "\uf096"
def blend_single(c: int, over: int, opacity: float) -> int:
return round(over * (1 - opacity) + c * opacity)
def blend(color: Color, opacity: float, *, over: Color = (255, 255, 255)) -> Color:
assert 0.0 <= opacity <= 1.0
return (
blend_single(color[0], over[0], opacity),
blend_single(color[1], over[1], opacity),
blend_single(color[2], over[2], opacity),
)
def lighten(color: Color, *, by: float) -> Color:
# colorsys uses values in the range [0, 1] rather than pillow/CSS-style [0, 225]
r, g, b = tuple(c / 255 for c in color)
hue, lightness, saturation = colorsys.rgb_to_hls(r, g, b)
# by% of the way between value and 1.0
if by >= 0: lightness += (1.0 - lightness) * by
# -by% of the way between 0 and value
else:lightness *= (1.0 + by)
return cast(Color, tuple(
round(c * 255)
for c in colorsys.hls_to_rgb(hue, lightness, saturation)
))

View File

@ -1,17 +1,18 @@
# -*- coding: utf-8 -*-
import logging
from odoo import Command
from odoo.http import Controller, request, route
try:
from odoo.addons.saas_worker.util import from_role
except ImportError:
def from_role(_):
def from_role(*_, **__):
return lambda _: None
_logger = logging.getLogger(__name__)
class MergebotReviewerProvisioning(Controller):
@from_role('accounts')
@from_role('accounts', signed=True)
@route('/runbot_merge/users', type='json', auth='public')
def list_users(self):
env = request.env(su=True)
@ -23,7 +24,7 @@ class MergebotReviewerProvisioning(Controller):
if u.github_login
]
@from_role('accounts')
@from_role('accounts', signed=True)
@route('/runbot_merge/provision', type='json', auth='public')
def provision_user(self, users):
_logger.info('Provisioning %s users: %s.', len(users), ', '.join(map(
@ -34,7 +35,12 @@ class MergebotReviewerProvisioning(Controller):
Partners = env['res.partner']
Users = env['res.users']
existing_partners = Partners.search([
existing_logins = set()
existing_oauth = set()
for u in Users.with_context(active_test=False).search([]):
existing_logins.add(u.login)
existing_oauth .add((u.oauth_provider_id.id, u.oauth_uid))
existing_partners = Partners.with_context(active_test=False).search([
'|', ('email', 'in', [u['email'] for u in users]),
('github_login', 'in', [u['github_login'] for u in users])
])
@ -55,29 +61,54 @@ class MergebotReviewerProvisioning(Controller):
if p.github_login:
# assume there can't be an existing one because github_login is
# unique, and should not be able to collide with emails
partners[p.github_login] = p
partners[p.github_login.casefold()] = p
portal = env.ref('base.group_portal')
internal = env.ref('base.group_user')
odoo_provider = env.ref('auth_oauth.provider_openerp')
to_create = []
created = updated = 0
updated = 0
to_activate = Partners
for new in users:
if 'sub' in new:
new['oauth_provider_id'] = odoo_provider.id
new['oauth_uid'] = new.pop('sub')
# prioritise by github_login as that's the unique-est point of information
current = partners.get(new['github_login']) or partners.get(new['email']) or Partners
current = partners.get(new['github_login'].casefold()) or partners.get(new['email']) or Partners
if not current.active:
to_activate |= current
# entry doesn't have user -> create user
if not current.user_ids:
# skip users without an email (= login) as that
# fails
if not new['email']:
_logger.info(
"Unable to create user for %s: no email in provisioning data",
current.display_name
)
continue
if 'oauth_uid' in new:
if (new['oauth_provider_id'], new['oauth_uid']) in existing_oauth:
_logger.warning(
"Attempted to create user with duplicate oauth uid "
"%s with provider %r for provisioning entry %r. "
"There is likely a duplicate partner (one version "
"with email, one with github login)",
new['oauth_uid'], odoo_provider.display_name, new,
)
continue
if new['email'] in existing_logins:
_logger.warning(
"Attempted to create user with duplicate login %s for "
"provisioning entry %r. There is likely a duplicate "
"partner (one version with email, one with github "
"login)",
new['email'], new,
)
continue
new['login'] = new['email']
new['groups_id'] = [(4, internal.id)]
new['groups_id'] = [Command.link(internal.id)]
# entry has partner -> create user linked to existing partner
# (and update partner implicitly)
if current:
@ -88,26 +119,36 @@ class MergebotReviewerProvisioning(Controller):
# otherwise update user (if there is anything to update)
user = current.user_ids
if len(user) != 1:
_logger.warning("Got %d users for partner %s.", len(user), current.display_name)
_logger.warning("Got %d users for partner %s, updating first.", len(user), current.display_name)
user = user[:1]
new.setdefault("active", True)
update_vals = {
k: v
for k, v in new.items()
if v not in ('login', 'email')
if v != (user[k] if k != 'oauth_provider_id' else user[k].id)
}
if user.has_group('base.group_portal'):
update_vals['groups_id'] = [
Command.unlink(portal.id),
Command.link(internal.id),
]
if update_vals:
user.write(update_vals)
updated += 1
created = len(to_create)
if to_create:
# only create 100 users at a time to avoid request timeout
Users.create(to_create[:100])
created = len(to_create[:100])
Users.create(to_create)
if to_activate:
to_activate.active = True
_logger.info("Provisioning: created %d updated %d.", created, updated)
return [created, updated]
@from_role('accounts')
@from_role('accounts', signed=True)
@route(['/runbot_merge/get_reviewers'], type='json', auth='public')
def fetch_reviewers(self, **kwargs):
reviewers = request.env['res.partner.review'].sudo().search([
@ -115,17 +156,18 @@ class MergebotReviewerProvisioning(Controller):
]).mapped('partner_id.github_login')
return reviewers
@from_role('accounts')
@from_role('accounts', signed=True)
@route(['/runbot_merge/remove_reviewers'], type='json', auth='public', methods=['POST'])
def update_reviewers(self, github_logins, **kwargs):
partners = request.env['res.partner'].sudo().search([('github_login', 'in', github_logins)])
partners.write({
'review_rights': [(5, 0, 0)],
'delegate_reviewer': [(5, 0, 0)],
'email': False,
'review_rights': [Command.clear()],
'delegate_reviewer': [Command.clear()],
})
# Assign the linked users as portal users
partners.mapped('user_ids').write({
'groups_id': [(6, 0, [request.env.ref('base.group_portal').id])]
'groups_id': [Command.set([request.env.ref('base.group_portal').id])]
})
return True

View File

@ -4,30 +4,33 @@
<field name="model_id" ref="model_runbot_merge_project"/>
<field name="state">code</field>
<field name="code">model._check_stagings(True)</field>
<field name="interval_number">1</field>
<field name="interval_type">minutes</field>
<field name="interval_number">6</field>
<field name="interval_type">hours</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">30</field>
</record>
<record model="ir.cron" id="staging_cron">
<field name="name">Check for progress of PRs and create Stagings</field>
<field name="model_id" ref="model_runbot_merge_project"/>
<field name="state">code</field>
<field name="code">model._create_stagings(True)</field>
<field name="interval_number">1</field>
<field name="interval_type">minutes</field>
<field name="interval_number">6</field>
<field name="interval_type">hours</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">40</field>
</record>
<record model="ir.cron" id="feedback_cron">
<field name="name">Send feedback to PR</field>
<field name="model_id" ref="model_runbot_merge_pull_requests_feedback"/>
<field name="state">code</field>
<field name="code">model._send()</field>
<field name="interval_number">1</field>
<field name="interval_type">minutes</field>
<field name="interval_number">6</field>
<field name="interval_type">hours</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">60</field>
</record>
<record model="ir.cron" id="labels_cron">
<field name="name">Update labels on PR</field>
@ -38,16 +41,18 @@
<field name="interval_type">minutes</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">70</field>
</record>
<record model="ir.cron" id="fetch_prs_cron">
<field name="name">Check for PRs to fetch</field>
<field name="model_id" ref="model_runbot_merge_fetch_job"/>
<field name="state">code</field>
<field name="code">model._check(True)</field>
<field name="interval_number">1</field>
<field name="interval_type">minutes</field>
<field name="interval_number">6</field>
<field name="interval_type">hours</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">10</field>
</record>
<record model="ir.cron" id="check_linked_prs_status">
<field name="name">Warn on linked PRs where only one is ready</field>
@ -58,15 +63,17 @@
<field name="interval_type">hours</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">50</field>
</record>
<record model="ir.cron" id="process_updated_commits">
<field name="name">Impact commit statuses on PRs and stagings</field>
<field name="model_id" ref="model_runbot_merge_commit"/>
<field name="state">code</field>
<field name="code">model._notify()</field>
<field name="interval_number">1</field>
<field name="interval_type">minutes</field>
<field name="interval_number">6</field>
<field name="interval_type">hours</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">20</field>
</record>
</odoo>

View File

@ -0,0 +1,174 @@
id,template,help
runbot_merge.handle.branch.unmanaged,"This PR targets the un-managed branch {repository}:{branch}, it needs to be retargeted before it can be merged.","Notifies of event on PR whose branch is not managed by the mergebot.
repository: repository name
branch: branch (ref) name
event: complete pr event"
runbot_merge.handle.branch.inactive,"This PR targets the disabled branch {repository}:{branch}, it needs to be retargeted before it can be merged.","Notifies of event on PR whose branch is deactivated.
repository: repository name
branch: branch (ref) name
event: complete pr event"
runbot_merge.handle.pr.merged,@{event[sender][login]} ya silly goose you can't reopen a merged PR.,"Notifies that a user tried to reopen a merged PR.
Event: complete PR event"
runbot_merge.pr.load.unmanaged,"Branch `{pr[base][ref]}` is not within my remit, imma just ignore it.","Notifies that a user tried to load a PR targeting a non-handled branch.
pr: pull request (github object)
Repository: repository object (???)"
runbot_merge.pr.load.fetched,"{pr.ping}I didn't know about this PR and had to retrieve its information, you may have to re-approve it as I didn't see previous commands.","Notifies that we did retrieve an unknown PR (either by request or as side effect of an interaction).
Pr: pr object we just created"
runbot_merge.pr.branch.disabled,"{pr.ping}the target branch {pr.target.name!r} has been disabled, you may want to close this PR.","Notifies that the target branch for this PR was deactivated.
pr: pull request in question"
runbot_merge.pr.merge.failed,{pr.ping}unable to stage: {reason},"Notifies that the PR could not be merged into the staging branch.
pr: pr object we tried to merge
reason: error message
exc: exception object"
runbot_merge.pr.fetch.unmanaged,I'm sorry. Branch `{branch}` is not within my remit.,"Responds to a request to fetch a PR to an unmanaged branch.
repository: pr repository
branch: target branch
number: pr number"
runbot_merge.command.access.no,"I'm sorry, @{user}. I'm afraid I can't do that.","Responds to command by a user who has no rights at all.
user: github login of comment sender
pr: pr object to which the command was sent"
runbot_merge.command.approve.failure,@{user} you may want to rebuild or fix this PR as it has failed CI.,"Responds to r+ of PR with failed CI.
user: github login of comment sender
pr: pr object to which the command was sent"
runbot_merge.command.unapprove.p0,"Skipchecks removed due to r-.","Responds to r- of pr in skipchecks.
user: github login of comment sender
pr: pr object to which the command was sent"
runbot_merge.command.method,Merge method set to {new_method}.,"Responds to the setting of the merge method.
new_method: ...
pr: pr object to which the command was sent
user: github login of the comment sender"
runbot_merge.failure.approved,{pr.ping}{status!r} failed on this reviewed PR.,"Notification of failed status on a reviewed PR.
pr: pull request in question
status: failed status"
runbot_merge.pr.created,[![Pull request status dashboard]({pr.url}.png)]({pr.url}),"Initial comment on PR creation.
pr: created pr"
runbot_merge.pr.linked.not_ready,{pr.ping}linked pull request(s) {siblings} not ready. Linked PRs are not staged until all of them are ready.,"Comment when a PR is ready (approved & validated) but it is linked to other PRs which are not.
pr: pr we're looking at
siblings: its siblings, as a single comma-separated list of PR links"
runbot_merge.pr.merge_method,"{pr.ping}because this PR has multiple commits, I need to know how to merge it:
{methods}","Comment when a PR is ready but doesn't have a merge method set
pr: the pr we can't stage
methods: a markdown-formatted list of valid merge methods"
runbot_merge.pr.staging.mismatch,"{pr.ping}we apparently missed updates to this PR and tried to stage it in a state which might not have been approved.
The properties {mismatch} were not correctly synchronized and have been updated.
<details><summary>differences</summary>
```diff
{diff}```
</details>
Note that we are unable to check the properties {unchecked}.
Please check and re-approve.
","Comment when staging was attempted but a sanity check revealed the github state and the mergebot state differ.
pr: the pr we tried to stage
mismatch: comma separated list of mismatched property names
diff: patch-style view of the differing properties
unchecked: comma-separated list of properties which can't be checked"
runbot_merge.pr.staging.fail,{pr.ping}staging failed: {message},"Comment when a PR caused a staging to fail (normally only sent if the staging has a single batch, may be sent on multiple PRs depending whether the heuristic to guess the problematic PR of a batch succeeded)
pr: the pr
message: staging failure information (error message, build link, etc...)"
runbot_merge.forwardport.updates.closed,"{pr.ping}ancestor PR {parent.display_name} has been updated but this PR is {pr.state} and can't be updated to match.
You may want or need to manually update any followup PR.","Comment when a PR is updated and on of its followups is already merged or closed. Sent to the followup.
pr: the closed or merged PR
parent: the modified ancestor PR"
runbot_merge.forwardport.updates.conflict.parent,"{pr.ping}WARNING: the latest change ({pr.head}) triggered a conflict when updating the next forward-port ({next.display_name}), and has been ignored.
You will need to update this pull request differently, or fix the issue by hand on {next.display_name}.","Comment when a PR update triggers a conflict in a child.
pr: updated parent PR
next: child PR in conflict"
runbot_merge.forwardport.updates.conflict.child,"{pr.ping}WARNING: the update of {previous.display_name} to {previous.head} has caused a conflict in this pull request, data may have been lost.{stdout}{stderr}","Comment when a PR update followup is in conflict.
pr: PR where update followup conflict happened
previous: parent PR which triggered the followup
stdout: markdown-formatted stdout of git, if any
stderr: markdown-formatted stderr of git, if any"
runbot_merge.forwardport.update.detached,{pr.ping}this PR was modified / updated and has become a normal PR. It must be merged directly.,"Comment when a forwardport PR gets updated, documents that the PR now needs to be merged the “normal” way.
pr: the pr in question "
runbot_merge.forwardport.update.parent,{pr.ping}child PR {child.display_name} was modified / updated and has become a normal PR. This PR (and any of its parents) will need to be merged independently as approvals won't cross.,"Sent to an open PR when its direct child has been detached.
pr: the pr
child: its detached child"
runbot_merge.forwardport.ci.failed,{pr.ping}{ci} failed on this forward-port PR,"Comment when CI fails on a forward-port PR (which thus won't port any further, for now).
pr: the pr in question
ci: the failed status"
runbot_merge.forwardport.failure.discrepancy,{pr.ping}this pull request can not be forward-ported: next branch is {next!r} but linked pull request {linked.display_name} has a next branch {other!r}.,"Comment when we tried to forward port a PR batch, but the PRs have different next targets (unlikely to happen really).
pr: the pr we tried to forward port
linked: the linked PR with a different next target
next: next target for the current pr
other: next target for the other pr"
runbot_merge.forwardport.failure.conflict,"{pr.ping}the next pull request ({new.display_name}) is in conflict. You can merge the chain up to here by saying
> @{pr.repository.project_id.github_prefix} r+
{footer}","Comment when a forward port was created but is in conflict, warns of that & gives instructions for current PR.
pr: the pr which was just forward ported
new: the new forward-port
footer: some footer text"
runbot_merge.forwardport.reminder,{pr.ping}this forward port of {source.display_name} is awaiting action (not merged or closed).,"Comment when a forward port has outstanding (not merged or closed) descendants
pr: the forward-port
source: the source PR"
runbot_merge.forwardport.failure,"{pr.ping}cherrypicking of pull request {pr.source_id.display_name} failed.
{commits}{stdout}{stderr}
Either perform the forward-port manually (and push to this branch, proceeding as usual) or close this PR (maybe?).
In the former case, you may want to edit this PR message as well.
:warning: after resolving this conflict, you will need to merge it via @{pr.repository.project_id.github_prefix}.
{footer}","Comment when a forward-port failed.
pr: the new pr (in failure)
commits: markdown-formatted list of source commits, indicating which failed
stdout: git's stdout
stderr: git's stderr
footer: some footer text"
runbot_merge.forwardport.linked,"{pr.ping}while this was properly forward-ported, at least one co-dependent PR ({siblings}) did not succeed. You will need to fix it before this can be merged.
Both this PR and the others will need to be approved via `@{pr.repository.project_id.github_prefix} r+` as they are all considered “in conflict”.
{footer} ","Comment when a forward port succeeded but at least one sibling failed.
pr: the current pr (new)
siblings: comma-separated list of sibling links
footer: some footer text"
runbot_merge.forwardport.final,"{pr.ping}this PR targets {pr.target.name} and is the last of the forward-port chain{containing}
{ancestors}
To merge the full chain, use
> @{pr.repository.project_id.github_prefix} r+
{footer}","Comment when a forward port was created and is the last of a sequence (target the limit branch).
pr: the new forward port
containing: label changing depending whether there are ancestors to merge
ancestors: markdown formatted list of parent PRs which can be approved as part of the chain
footer: a footer"
runbot_merge.forwardport.intermediate,"This PR targets {pr.target.name} and is part of the forward-port chain. Further PRs will be created up to {pr.limit_pretty}.
{footer}","Comment when a forward port was succcessfully created but is not the last of the line.
pr: the new forward port
footer: a footer"
1 id template help
2 runbot_merge.handle.branch.unmanaged This PR targets the un-managed branch {repository}:{branch}, it needs to be retargeted before it can be merged. Notifies of event on PR whose branch is not managed by the mergebot. repository: repository name branch: branch (ref) name event: complete pr event
3 runbot_merge.handle.branch.inactive This PR targets the disabled branch {repository}:{branch}, it needs to be retargeted before it can be merged. Notifies of event on PR whose branch is deactivated. repository: repository name branch: branch (ref) name event: complete pr event
4 runbot_merge.handle.pr.merged @{event[sender][login]} ya silly goose you can't reopen a merged PR. Notifies that a user tried to reopen a merged PR. Event: complete PR event
5 runbot_merge.pr.load.unmanaged Branch `{pr[base][ref]}` is not within my remit, imma just ignore it. Notifies that a user tried to load a PR targeting a non-handled branch. pr: pull request (github object) Repository: repository object (???)
6 runbot_merge.pr.load.fetched {pr.ping}I didn't know about this PR and had to retrieve its information, you may have to re-approve it as I didn't see previous commands. Notifies that we did retrieve an unknown PR (either by request or as side effect of an interaction). Pr: pr object we just created
7 runbot_merge.pr.branch.disabled {pr.ping}the target branch {pr.target.name!r} has been disabled, you may want to close this PR. Notifies that the target branch for this PR was deactivated. pr: pull request in question
8 runbot_merge.pr.merge.failed {pr.ping}unable to stage: {reason} Notifies that the PR could not be merged into the staging branch. pr: pr object we tried to merge reason: error message exc: exception object
9 runbot_merge.pr.fetch.unmanaged I'm sorry. Branch `{branch}` is not within my remit. Responds to a request to fetch a PR to an unmanaged branch. repository: pr repository branch: target branch number: pr number
10 runbot_merge.command.access.no I'm sorry, @{user}. I'm afraid I can't do that. Responds to command by a user who has no rights at all. user: github login of comment sender pr: pr object to which the command was sent
11 runbot_merge.command.approve.failure @{user} you may want to rebuild or fix this PR as it has failed CI. Responds to r+ of PR with failed CI. user: github login of comment sender pr: pr object to which the command was sent
12 runbot_merge.command.unapprove.p0 Skipchecks removed due to r-. Responds to r- of pr in skipchecks. user: github login of comment sender pr: pr object to which the command was sent
13 runbot_merge.command.method Merge method set to {new_method}. Responds to the setting of the merge method. new_method: ... pr: pr object to which the command was sent user: github login of the comment sender
14 runbot_merge.failure.approved {pr.ping}{status!r} failed on this reviewed PR. Notification of failed status on a reviewed PR. pr: pull request in question status: failed status
15 runbot_merge.pr.created [![Pull request status dashboard]({pr.url}.png)]({pr.url}) Initial comment on PR creation. pr: created pr
16 runbot_merge.pr.linked.not_ready {pr.ping}linked pull request(s) {siblings} not ready. Linked PRs are not staged until all of them are ready. Comment when a PR is ready (approved & validated) but it is linked to other PRs which are not. pr: pr we're looking at siblings: its siblings, as a single comma-separated list of PR links
17 runbot_merge.pr.merge_method {pr.ping}because this PR has multiple commits, I need to know how to merge it: {methods} Comment when a PR is ready but doesn't have a merge method set pr: the pr we can't stage methods: a markdown-formatted list of valid merge methods
18 runbot_merge.pr.staging.mismatch {pr.ping}we apparently missed updates to this PR and tried to stage it in a state which might not have been approved. The properties {mismatch} were not correctly synchronized and have been updated. <details><summary>differences</summary> ```diff {diff}``` </details> Note that we are unable to check the properties {unchecked}. Please check and re-approve. Comment when staging was attempted but a sanity check revealed the github state and the mergebot state differ. pr: the pr we tried to stage mismatch: comma separated list of mismatched property names diff: patch-style view of the differing properties unchecked: comma-separated list of properties which can't be checked
19 runbot_merge.pr.staging.fail {pr.ping}staging failed: {message} Comment when a PR caused a staging to fail (normally only sent if the staging has a single batch, may be sent on multiple PRs depending whether the heuristic to guess the problematic PR of a batch succeeded) pr: the pr message: staging failure information (error message, build link, etc...)
20 runbot_merge.forwardport.updates.closed {pr.ping}ancestor PR {parent.display_name} has been updated but this PR is {pr.state} and can't be updated to match. You may want or need to manually update any followup PR. Comment when a PR is updated and on of its followups is already merged or closed. Sent to the followup. pr: the closed or merged PR parent: the modified ancestor PR
21 runbot_merge.forwardport.updates.conflict.parent {pr.ping}WARNING: the latest change ({pr.head}) triggered a conflict when updating the next forward-port ({next.display_name}), and has been ignored. You will need to update this pull request differently, or fix the issue by hand on {next.display_name}. Comment when a PR update triggers a conflict in a child. pr: updated parent PR next: child PR in conflict
22 runbot_merge.forwardport.updates.conflict.child {pr.ping}WARNING: the update of {previous.display_name} to {previous.head} has caused a conflict in this pull request, data may have been lost.{stdout}{stderr} Comment when a PR update followup is in conflict. pr: PR where update followup conflict happened previous: parent PR which triggered the followup stdout: markdown-formatted stdout of git, if any stderr: markdown-formatted stderr of git, if any
23 runbot_merge.forwardport.update.detached {pr.ping}this PR was modified / updated and has become a normal PR. It must be merged directly. Comment when a forwardport PR gets updated, documents that the PR now needs to be merged the “normal” way. pr: the pr in question
24 runbot_merge.forwardport.update.parent {pr.ping}child PR {child.display_name} was modified / updated and has become a normal PR. This PR (and any of its parents) will need to be merged independently as approvals won't cross. Sent to an open PR when its direct child has been detached. pr: the pr child: its detached child
25 runbot_merge.forwardport.ci.failed {pr.ping}{ci} failed on this forward-port PR Comment when CI fails on a forward-port PR (which thus won't port any further, for now). pr: the pr in question ci: the failed status
26 runbot_merge.forwardport.failure.discrepancy {pr.ping}this pull request can not be forward-ported: next branch is {next!r} but linked pull request {linked.display_name} has a next branch {other!r}. Comment when we tried to forward port a PR batch, but the PRs have different next targets (unlikely to happen really). pr: the pr we tried to forward port linked: the linked PR with a different next target next: next target for the current pr other: next target for the other pr
27 runbot_merge.forwardport.failure.conflict {pr.ping}the next pull request ({new.display_name}) is in conflict. You can merge the chain up to here by saying > @{pr.repository.project_id.github_prefix} r+ {footer} Comment when a forward port was created but is in conflict, warns of that & gives instructions for current PR. pr: the pr which was just forward ported new: the new forward-port footer: some footer text
28 runbot_merge.forwardport.reminder {pr.ping}this forward port of {source.display_name} is awaiting action (not merged or closed). Comment when a forward port has outstanding (not merged or closed) descendants pr: the forward-port source: the source PR
29 runbot_merge.forwardport.failure {pr.ping}cherrypicking of pull request {pr.source_id.display_name} failed. {commits}{stdout}{stderr} Either perform the forward-port manually (and push to this branch, proceeding as usual) or close this PR (maybe?). In the former case, you may want to edit this PR message as well. :warning: after resolving this conflict, you will need to merge it via @{pr.repository.project_id.github_prefix}. {footer} Comment when a forward-port failed. pr: the new pr (in failure) commits: markdown-formatted list of source commits, indicating which failed stdout: git's stdout stderr: git's stderr footer: some footer text
30 runbot_merge.forwardport.linked {pr.ping}while this was properly forward-ported, at least one co-dependent PR ({siblings}) did not succeed. You will need to fix it before this can be merged. Both this PR and the others will need to be approved via `@{pr.repository.project_id.github_prefix} r+` as they are all considered “in conflict”. {footer} Comment when a forward port succeeded but at least one sibling failed. pr: the current pr (new) siblings: comma-separated list of sibling links footer: some footer text
31 runbot_merge.forwardport.final {pr.ping}this PR targets {pr.target.name} and is the last of the forward-port chain{containing} {ancestors} To merge the full chain, use > @{pr.repository.project_id.github_prefix} r+ {footer} Comment when a forward port was created and is the last of a sequence (target the limit branch). pr: the new forward port containing: label changing depending whether there are ancestors to merge ancestors: markdown formatted list of parent PRs which can be approved as part of the chain footer: a footer
32 runbot_merge.forwardport.intermediate This PR targets {pr.target.name} and is part of the forward-port chain. Further PRs will be created up to {pr.limit_pretty}. {footer} Comment when a forward port was succcessfully created but is not the last of the line. pr: the new forward port footer: a footer

272
runbot_merge/git.py Normal file
View File

@ -0,0 +1,272 @@
import dataclasses
import itertools
import logging
import os
import pathlib
import resource
import stat
import subprocess
from typing import Optional, TypeVar, Union, Sequence, Tuple, Dict
from odoo.tools.appdirs import user_cache_dir
from .github import MergeError, PrCommit
_logger = logging.getLogger(__name__)
def source_url(repository) -> str:
return 'https://{}@github.com/{}'.format(
repository.project_id.github_token,
repository.name,
)
def fw_url(repository) -> str:
return 'https://{}@github.com/{}'.format(
repository.project_id.fp_github_token,
repository.fp_remote_target,
)
Authorship = Union[Tuple[str, str], Tuple[str, str, str]]
def get_local(repository, *, clone: bool = True) -> 'Optional[Repo]':
repos_dir = pathlib.Path(user_cache_dir('mergebot'))
repos_dir.mkdir(parents=True, exist_ok=True)
# NB: `repository.name` is `$org/$name` so this will be a subdirectory, probably
repo_dir = repos_dir / repository.name
if repo_dir.is_dir():
return git(repo_dir)
elif clone:
_logger.info("Cloning out %s to %s", repository.name, repo_dir)
subprocess.run(['git', 'clone', '--bare', source_url(repository), str(repo_dir)], check=True)
# bare repos don't have fetch specs by default, and fetching *into*
# them is a pain in the ass, configure fetch specs so `git fetch`
# works properly
repo = git(repo_dir)
repo.config('--add', 'remote.origin.fetch', '+refs/heads/*:refs/heads/*')
# negative refspecs require git 2.29
repo.config('--add', 'remote.origin.fetch', '^refs/heads/tmp.*')
repo.config('--add', 'remote.origin.fetch', '^refs/heads/staging.*')
return repo
else:
_logger.warning(
"Unable to acquire %s: %s",
repo_dir,
"doesn't exist" if not repo_dir.exists()\
else oct(stat.S_IFMT(repo_dir.stat().st_mode))
)
return None
ALWAYS = ('gc.auto=0', 'maintenance.auto=0')
def _bypass_limits():
resource.setrlimit(resource.RLIMIT_AS, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
def git(directory: str) -> 'Repo':
return Repo(directory, check=True)
Self = TypeVar("Self", bound="Repo")
class Repo:
def __init__(self, directory, **config) -> None:
self._directory = str(directory)
config.setdefault('stderr', subprocess.PIPE)
self._config = config
self._params = ()
def __getattr__(self, name: str) -> 'GitCommand':
return GitCommand(self, name.replace('_', '-'))
def _run(self, *args, **kwargs) -> subprocess.CompletedProcess:
opts = {**self._config, **kwargs}
args = ('git', '-C', self._directory)\
+ tuple(itertools.chain.from_iterable(('-c', p) for p in self._params + ALWAYS))\
+ args
try:
return subprocess.run(args, preexec_fn=_bypass_limits, **opts)
except subprocess.CalledProcessError as e:
stream = e.stderr or e.stdout
if stream:
_logger.error("git call error: %s", stream)
raise
def stdout(self, flag: bool = True) -> Self:
if flag is True:
return self.with_config(stdout=subprocess.PIPE)
elif flag is False:
return self.with_config(stdout=None)
return self.with_config(stdout=flag)
def check(self, flag: bool) -> Self:
return self.with_config(check=flag)
def with_config(self, **kw) -> Self:
opts = {**self._config, **kw}
r = Repo(self._directory, **opts)
r._params = self._params
return r
def with_params(self, *args) -> Self:
r = self.with_config()
r._params = args
return r
def clone(self, to: str, branch: Optional[str] = None) -> Self:
self._run(
'clone',
*([] if branch is None else ['-b', branch]),
self._directory, to,
)
return Repo(to)
def get_tree(self, commit_hash: str) -> str:
r = self.with_config(check=True).rev_parse(f'{commit_hash}^{{tree}}')
return r.stdout.strip()
def rebase(self, dest: str, commits: Sequence[PrCommit]) -> Tuple[str, Dict[str, str]]:
"""Implements rebase by hand atop plumbing so:
- we can work without a working copy
- we can track individual commits (and store the mapping)
It looks like `--merge-base` is not sufficient for `merge-tree` to
correctly keep track of history, so it loses contents. Therefore
implement in two passes as in the github version.
"""
repo = self.stdout().with_config(text=True, check=False)
logger = _logger.getChild('rebase')
if not commits:
raise MergeError("PR has no commits")
prev_tree = repo.get_tree(dest)
prev_original_tree = repo.get_tree(commits[0]['parents'][0]["sha"])
new_trees = []
parent = dest
for original in commits:
if len(original['parents']) != 1:
raise MergeError(
f"commits with multiple parents ({original['sha']}) can not be rebased, "
"either fix the branch to remove merges or merge without "
"rebasing")
new_trees.append(check(repo.merge_tree(parent, original['sha'])).stdout.strip())
# allow merging empty commits, but not empty*ing* commits while merging
if prev_original_tree != original['commit']['tree']['sha']:
if new_trees[-1] == prev_tree:
raise MergeError(
f"commit {original['sha']} results in an empty tree when "
f"merged, it is likely a duplicate of a merged commit, "
f"rebase and remove."
)
parent = check(repo.commit_tree(
tree=new_trees[-1],
parents=[parent, original['sha']],
message=f'temp rebase {original["sha"]}',
)).stdout.strip()
prev_tree = new_trees[-1]
prev_original_tree = original['commit']['tree']['sha']
mapping = {}
for original, tree in zip(commits, new_trees):
authorship = check(repo.show('--no-patch', '--pretty=%an%n%ae%n%ai%n%cn%n%ce', original['sha']))
author_name, author_email, author_date, committer_name, committer_email =\
authorship.stdout.splitlines()
c = check(repo.commit_tree(
tree=tree,
parents=[dest],
message=original['commit']['message'],
author=(author_name, author_email, author_date),
committer=(committer_name, committer_email),
)).stdout.strip()
logger.debug('copied %s to %s (parent: %s)', original['sha'], c, dest)
dest = mapping[original['sha']] = c
return dest, mapping
def merge(self, c1: str, c2: str, msg: str, *, author: Tuple[str, str]) -> str:
repo = self.stdout().with_config(text=True, check=False)
t = repo.merge_tree(c1, c2)
if t.returncode:
raise MergeError(t.stderr)
c = self.commit_tree(
tree=t.stdout.strip(),
message=msg,
parents=[c1, c2],
author=author,
)
if c.returncode:
raise MergeError(c.stderr)
return c.stdout.strip()
def commit_tree(
self, *, tree: str, message: str,
parents: Sequence[str] = (),
author: Optional[Authorship] = None,
committer: Optional[Authorship] = None,
) -> subprocess.CompletedProcess:
authorship = {}
if author:
authorship['GIT_AUTHOR_NAME'] = author[0]
authorship['GIT_AUTHOR_EMAIL'] = author[1]
if len(author) > 2:
authorship['GIT_AUTHOR_DATE'] = author[2]
if committer:
authorship['GIT_COMMITTER_NAME'] = committer[0]
authorship['GIT_COMMITTER_EMAIL'] = committer[1]
if len(committer) > 2:
authorship['GIT_COMMITTER_DATE'] = committer[2]
return self.with_config(
input=message,
stdout=subprocess.PIPE,
text=True,
env={
**os.environ,
**authorship,
# we don't want git to use the timezone of the machine it's
# running on: previously it used the timezone configured in
# github (?), which I think / assume defaults to a generic UTC
'TZ': 'UTC',
}
)._run(
'commit-tree',
tree,
'-F', '-',
*itertools.chain.from_iterable(('-p', p) for p in parents),
)
def check(p: subprocess.CompletedProcess) -> subprocess.CompletedProcess:
if not p.returncode:
return p
_logger.info("rebase failed at %s\nstdout:\n%s\nstderr:\n%s", p.args, p.stdout, p.stderr)
raise MergeError(p.stderr or 'merge conflict')
@dataclasses.dataclass
class GitCommand:
repo: Repo
name: str
def __call__(self, *args, **kwargs) -> subprocess.CompletedProcess:
return self.repo._run(self.name, *args, *self._to_options(kwargs))
def _to_options(self, d):
for k, v in d.items():
if len(k) == 1:
yield '-' + k
else:
yield '--' + k.replace('_', '-')
if v not in (None, True):
assert v is not False
yield str(v)

View File

@ -1,13 +1,14 @@
import collections.abc
import itertools
import json as json_
import json
import logging
import logging.handlers
import os
import pathlib
import pprint
import textwrap
import time
import unicodedata
from typing import Iterable, List, TypedDict, Literal
import requests
import werkzeug.urls
@ -47,7 +48,47 @@ def _init_gh_logger():
if odoo.netsvc._logger_init:
_init_gh_logger()
GH_LOG_PATTERN = """=> {method} /{self._repo}/{path}{qs}{body}
SimpleUser = TypedDict('SimpleUser', {
'login': str,
'url': str,
'type': Literal['User', 'Organization'],
})
Authorship = TypedDict('Authorship', {
'name': str,
'email': str,
})
CommitTree = TypedDict('CommitTree', {
'sha': str,
'url': str,
})
Commit = TypedDict('Commit', {
'tree': CommitTree,
'url': str,
'message': str,
# optional when creating a commit
'author': Authorship,
'committer': Authorship,
'comments_count': int,
})
CommitLink = TypedDict('CommitLink', {
'html_url': str,
'sha': str,
'url': str,
})
PrCommit = TypedDict('PrCommit', {
'url': str,
'sha': str,
'commit': Commit,
# optional when creating a commit (in which case it uses the current user)
'author': SimpleUser,
'committer': SimpleUser,
'parents': List[CommitLink],
# not actually true but we're smuggling stuff via that key
'new_tree': str,
})
GH_LOG_PATTERN = """=> {method} {path}{qs}{body}
<= {r.status_code} {r.reason}
{headers}
@ -58,11 +99,12 @@ class GH(object):
def __init__(self, token, repo):
self._url = 'https://api.github.com'
self._repo = repo
self._last_update = 0
session = self._session = requests.Session()
session.headers['Authorization'] = 'token {}'.format(token)
session.headers['Accept'] = 'application/vnd.github.symmetra-preview+json'
def _log_gh(self, logger, method, path, params, json, response, level=logging.INFO):
def _log_gh(self, logger: logging.Logger, response: requests.Response, level: int = logging.INFO, extra=None):
""" Logs a pair of request / response to github, to the specified
logger, at the specified level.
@ -70,11 +112,14 @@ class GH(object):
bodies, at least in part) so we have as much information as possible
for post-mortems.
"""
body = body2 = ''
req = response.request
url = werkzeug.urls.url_parse(req.url)
if url.netloc != 'api.github.com':
return
if json:
body = '\n' + textwrap.indent('\t', pprint.pformat(json, indent=4))
body = '' if not req.body else ('\n' + pprint.pformat(json.loads(req.body.decode()), indent=4))
body2 = ''
if response.content:
if _is_json(response):
body2 = pprint.pformat(response.json(), depth=4)
@ -87,41 +132,45 @@ class GH(object):
)
logger.log(level, GH_LOG_PATTERN.format(
self=self,
# requests data
method=method, path=path,
qs='' if not params else ('?' + werkzeug.urls.url_encode(params)),
body=utils.shorten(body.strip(), 400),
method=req.method, path=url.path, qs=url.query, body=body,
# response data
r=response,
headers='\n'.join(
'\t%s: %s' % (h, v) for h, v in response.headers.items()
),
body2=utils.shorten(body2.strip(), 400)
))
return body2
), extra=extra)
def __call__(self, method, path, params=None, json=None, check=True):
"""
:type check: bool | dict[int:Exception]
"""
if method.casefold() != 'get':
to_sleep = 1. - (time.time() - self._last_update)
if to_sleep > 0:
time.sleep(to_sleep)
path = f'/repos/{self._repo}/{path}'
r = self._session.request(method, self._url + path, params=params, json=json)
self._log_gh(_gh, method, path, params, json, r)
if method.casefold() != 'get':
self._last_update = time.time() + int(r.headers.get('Retry-After', 0))
self._log_gh(_gh, r)
if check:
if isinstance(check, collections.abc.Mapping):
exc = check.get(r.status_code)
if exc:
raise exc(r.text)
if r.status_code >= 400:
body = self._log_gh(
_logger, method, path, params, json, r, level=logging.ERROR)
if not isinstance(body, (bytes, str)):
raise requests.HTTPError(
json_.dumps(body, indent=4),
response=r
)
r.raise_for_status()
try:
if isinstance(check, collections.abc.Mapping):
exc = check.get(r.status_code)
if exc:
raise exc(r.text)
if r.status_code >= 400:
raise requests.HTTPError(r.text, response=r)
except Exception:
self._log_gh(_logger, r, level=logging.ERROR, extra={
'github-request-id': r.headers.get('x-github-request-id'),
})
raise
return r
def user(self, username):
@ -129,7 +178,7 @@ class GH(object):
r.raise_for_status()
return r.json()
def head(self, branch):
def head(self, branch: str) -> str:
d = utils.backoff(
lambda: self('get', 'git/refs/heads/{}'.format(branch)).json(),
exc=requests.HTTPError
@ -180,13 +229,17 @@ class GH(object):
if r.status_code == 200:
head = r.json()['object']['sha']
else:
head = '<Response [%s]: %s)>' % (r.status_code, r.json() if _is_json(r) else r.text)
head = '<Response [%s]: %s)>' % (r.status_code, r.text)
if head == to:
_logger.debug("Sanity check ref update of %s to %s: ok", branch, to)
return
_logger.warning("Sanity check ref update of %s, expected %s got %s", branch, to, head)
_logger.warning(
"Sanity check ref update of %s, expected %s got %s (response-id %s)",
branch, to, head,
r.headers.get('x-github-request-id')
)
return head
def fast_forward(self, branch, sha):
@ -200,7 +253,7 @@ class GH(object):
raise exceptions.FastForwardError(self._repo) \
from Exception("timeout: never saw %s" % sha)
except requests.HTTPError as e:
_logger.debug('fast_forward(%s, %s, %s) -> ERROR', self._repo, branch, sha, exc_info=True)
_logger.debug('fast_forward(%s, %s, %s) -> %s', self._repo, branch, sha, e)
if e.response.status_code == 422:
try:
r = e.response.json()
@ -220,7 +273,7 @@ class GH(object):
status0 = r.status_code
_logger.debug(
'ref_set(%s, %s, %s -> %s (%s)',
'set_ref(%s, %s, %s -> %s (%s)',
self._repo, branch, sha, status0,
'OK' if status0 == 200 else r.text or r.reason
)
@ -264,82 +317,6 @@ class GH(object):
f"Sanity check ref update of {branch}, expected {sha} got {head}"
return status
def merge(self, sha, dest, message):
r = self('post', 'merges', json={
'base': dest,
'head': sha,
'commit_message': message,
}, check={409: MergeError})
try:
r = r.json()
except Exception:
raise MergeError("Got non-JSON reponse from github: %s %s (%s)" % (r.status_code, r.reason, r.text))
_logger.debug(
"merge(%s, %s (%s), %s) -> %s",
self._repo, dest, r['parents'][0]['sha'],
shorten(message), r['sha']
)
return dict(r['commit'], sha=r['sha'], parents=r['parents'])
def rebase(self, pr, dest, reset=False, commits=None):
""" Rebase pr's commits on top of dest, updates dest unless ``reset``
is set.
Returns the hash of the rebased head and a map of all PR commits (to the PR they were rebased to)
"""
logger = _logger.getChild('rebase')
original_head = self.head(dest)
if commits is None:
commits = self.commits(pr)
logger.debug("rebasing %s, %s on %s (reset=%s, commits=%s)",
self._repo, pr, dest, reset, len(commits))
assert commits, "can't rebase a PR with no commits"
prev = original_head
for original in commits:
assert len(original['parents']) == 1, "can't rebase commits with more than one parent"
tmp_msg = 'temp rebasing PR %s (%s)' % (pr, original['sha'])
merged = self.merge(original['sha'], dest, tmp_msg)
# whichever parent is not original['sha'] should be what dest
# deref'd to, and we want to check that matches the "left parent" we
# expect (either original_head or the previously merged commit)
[base_commit] = (parent['sha'] for parent in merged['parents']
if parent['sha'] != original['sha'])
assert prev == base_commit,\
"Inconsistent view of %s between head (%s) and merge (%s)" % (
dest, prev, base_commit,
)
prev = merged['sha']
original['new_tree'] = merged['tree']['sha']
prev = original_head
mapping = {}
for c in commits:
committer = c['commit']['committer']
committer.pop('date')
copy = self('post', 'git/commits', json={
'message': c['commit']['message'],
'tree': c['new_tree'],
'parents': [prev],
'author': c['commit']['author'],
'committer': committer,
}, check={409: MergeError}).json()
logger.debug('copied %s to %s (parent: %s)', c['sha'], copy['sha'], prev)
prev = mapping[c['sha']] = copy['sha']
if reset:
self.set_ref(dest, original_head)
else:
self.set_ref(dest, prev)
logger.debug('rebased %s, %s on %s (reset=%s, commits=%s) -> %s',
self._repo, pr, dest, reset, len(commits),
prev)
# prev is updated after each copy so it's the rebased PR head
return prev, mapping
# fetch various bits of issues / prs to load them
def pr(self, number):
return (
@ -361,14 +338,14 @@ class GH(object):
if not r.links.get('next'):
return
def commits_lazy(self, pr):
def commits_lazy(self, pr: int) -> Iterable[PrCommit]:
for page in itertools.count(1):
r = self('get', 'pulls/{}/commits'.format(pr), params={'page': page})
r = self('get', f'pulls/{pr}/commits', params={'page': page})
yield from r.json()
if not r.links.get('next'):
return
def commits(self, pr):
def commits(self, pr: int) -> List[PrCommit]:
""" Returns a PR's commits oldest first (that's what GH does &
is what we want)
"""

View File

@ -1,6 +1,3 @@
import collections
def migrate(cr, version):
""" Status overrides: o2m -> m2m
"""

View File

@ -0,0 +1,11 @@
""" Migration for the unified commands parser, fp_github fields moved from
forwardport to mergebot (one of them is removed but we might not care)
"""
def migrate(cr, version):
cr.execute("""
UPDATE ir_model_data
SET module = 'runbot_merge'
WHERE module = 'forwardport'
AND model = 'ir.model.fields'
AND name in ('fp_github_token', 'fp_github_name')
""")

View File

@ -0,0 +1,124 @@
def move_fields(cr, *names):
cr.execute("""
UPDATE ir_model_data
SET module = 'runbot_merge'
WHERE module = 'forwardport'
AND model = 'runbot_merge_pull_requests'
AND name IN %s
""", [names])
def migrate(cr, version):
# cleanup some old crap
cr.execute("""
ALTER TABLE runbot_merge_project_freeze
DROP COLUMN IF EXISTS release_label,
DROP COLUMN IF EXISTS bump_label
""")
# fw constraint moved to mergebot, alongside all the fields it constrains
cr.execute("""
UPDATE ir_model_data
SET module = 'runbot_merge'
WHERE module = 'forwardport'
AND model = 'ir.model.constraint'
AND name = 'constraint_runbot_merge_pull_requests_fw_constraint'
""")
move_fields(
cr, 'merge_date', 'refname',
'limit_id', 'source_id', 'parent_id', 'root_id', 'forwardport_ids',
'detach_reason', 'fw_policy')
# view depends on pr.state, which prevents changing the state column's type
# we can just drop the view and it'll be recreated by the db update
cr.execute("DROP VIEW runbot_merge_freeze_labels")
# convert a few data types
cr.execute("""
CREATE TYPE runbot_merge_pull_requests_priority_type
AS ENUM ('default', 'priority', 'alone');
CREATE TYPE runbot_merge_pull_requests_state_type
AS ENUM ('opened', 'closed', 'validated', 'approved', 'ready', 'merged', 'error');
CREATE TYPE runbot_merge_pull_requests_merge_method_type
AS ENUM ('merge', 'rebase-merge', 'rebase-ff', 'squash');
CREATE TYPE runbot_merge_pull_requests_status_type
AS ENUM ('pending', 'failure', 'success');
ALTER TABLE runbot_merge_pull_requests
ALTER COLUMN priority
TYPE runbot_merge_pull_requests_priority_type
USING CASE WHEN priority = 0
THEN 'alone'
ELSE 'default'
END::runbot_merge_pull_requests_priority_type,
ALTER COLUMN state
TYPE runbot_merge_pull_requests_state_type
USING state::runbot_merge_pull_requests_state_type,
ALTER COLUMN merge_method
TYPE runbot_merge_pull_requests_merge_method_type
USING merge_method::runbot_merge_pull_requests_merge_method_type;
""")
cr.execute("""
ALTER TABLE runbot_merge_pull_requests
ADD COLUMN closed boolean not null default 'false',
ADD COLUMN error boolean not null default 'false',
ADD COLUMN skipchecks boolean not null default 'false',
ADD COLUMN cancel_staging boolean not null default 'false',
ADD COLUMN statuses text not null default '{}',
ADD COLUMN statuses_full text not null default '{}',
ADD COLUMN status runbot_merge_pull_requests_status_type not null default 'pending'
""")
# first pass: update all the new unconditional (or simple) fields
cr.execute("""
UPDATE runbot_merge_pull_requests p
SET closed = state = 'closed',
error = state = 'error',
skipchecks = priority = 'alone',
cancel_staging = priority = 'alone',
fw_policy = CASE fw_policy WHEN 'ci' THEN 'default' ELSE fw_policy END,
reviewed_by = CASE state
-- old version did not reset reviewer on PR update
WHEN 'opened' THEN NULL
WHEN 'validated' THEN NULL
-- if a PR predates the reviewed_by field, assign odoobot as reviewer
WHEN 'merged' THEN coalesce(reviewed_by, 2)
ELSE reviewed_by
END,
status = CASE state
WHEN 'validated' THEN 'success'
WHEN 'ready' THEN 'success'
WHEN 'merged' THEN 'success'
ELSE 'pending'
END::runbot_merge_pull_requests_status_type
""")
# the rest only gets updated if we have a matching commit which is not
# always the case
cr.execute("""
CREATE TEMPORARY TABLE parents ( id INTEGER not null, overrides jsonb not null );
WITH RECURSIVE parent_chain AS (
SELECT id, overrides::jsonb
FROM runbot_merge_pull_requests
WHERE parent_id IS NULL
UNION ALL
SELECT p.id, coalesce(pc.overrides || p.overrides::jsonb, pc.overrides, p.overrides::jsonb) as overrides
FROM runbot_merge_pull_requests p
JOIN parent_chain pc ON p.parent_id = pc.id
)
INSERT INTO parents SELECT * FROM parent_chain;
CREATE INDEX ON parents (id);
UPDATE runbot_merge_pull_requests p
SET statuses = jsonb_pretty(c.statuses::jsonb)::text,
statuses_full = jsonb_pretty(
c.statuses::jsonb
|| coalesce((select overrides from parents where id = p.parent_id), '{}')
|| overrides::jsonb
)::text
FROM runbot_merge_commit c
WHERE p.head = c.sha
""")

View File

@ -0,0 +1,833 @@
"""This is definitely the giantest of fucks as pretty much the entire model was
reworked
"""
import dataclasses
import logging
from collections import defaultdict
from itertools import chain
from typing import TypeVar, Any
from psycopg2.extras import execute_batch, execute_values
from psycopg2.sql import SQL
logger = logging.getLogger("odoo.modules.migration.runbot_merge.15.0.1.12")
def cleanup(cr):
"""There seems to be some *pretty* weird database state having crept
"""
# Until 2021 (not sure why exactly) a bunch of batches were created with no
# PRs, some staged and some not.
logger.info("Delete batches without PRs...")
cr.execute("""
DELETE FROM runbot_merge_batch
WHERE id IN (
SELECT b.id
FROM runbot_merge_batch b
LEFT JOIN runbot_merge_batch_runbot_merge_pull_requests_rel r ON (b.id = r.runbot_merge_batch_id)
WHERE r.runbot_merge_batch_id IS NULL
)
""")
# some of the batches above were the only ones of their stagings
logger.info("Delete stagings without batches...")
cr.execute("""
DELETE FROM runbot_merge_stagings
WHERE id IN (
SELECT s.id
FROM runbot_merge_stagings s
LEFT JOIN runbot_merge_batch b ON (s.id = b.staging_id)
WHERE b.id IS NULL
)
""")
# check PRs whose source has a source
cr.execute("""
SELECT
p.id AS id,
s.id AS source_id,
r.name || '#' || p.number AS pr,
pr.name || '#' || pp.number AS parent,
sr.name || '#' || s.number AS source
FROM runbot_merge_pull_requests p
JOIN runbot_merge_repository r ON (r.id = p.repository)
JOIN runbot_merge_pull_requests pp ON (pp.id = p.source_id)
JOIN runbot_merge_repository pr ON (pr.id = pp.repository)
JOIN runbot_merge_pull_requests s ON (s.id = pp.source_id)
JOIN runbot_merge_repository sr ON (sr.id = s.repository)
ORDER BY p.id;
""")
for pid, ssid, _, _, _ in cr.fetchall():
cr.execute("UPDATE runbot_merge_pull_requests SET source_id = %s WHERE id = %s", [ssid, pid])
def hlink(url):
"""A terminal hlink starts with OSC8;{params};{link}ST and ends with the
sequence with no params or link
"""
return f'\x9d8;;{url}\x9c'
def link(label, url):
return f"{hlink(url)}{label}{hlink('')}"
def batch_freezes(cr):
"""Old freezes were created batch-less but marked as merged, to make things
more consistent and avoid losing them for e.g. synthetic git histories,
associate then with synthetic successful stagings
"""
cr.execute("SELECT id FROM res_users WHERE login = 'moc@odoo.com'")
[uid] = cr.fetchone()
cr.execute("""
SELECT
array_agg(DISTINCT p.target) AS target,
array_agg(DISTINCT p.merge_date) AS merge_date,
json_object_agg(r.id, json_build_object(
'id', p.id,
'head', p.commits_map::json->''
)) AS prs
FROM runbot_merge_pull_requests p
JOIN runbot_merge_repository r ON (r.id = p.repository)
JOIN runbot_merge_branch t ON (t.id = p.target)
LEFT JOIN runbot_merge_batch_runbot_merge_pull_requests_rel bp ON (runbot_merge_pull_requests_id = p.id)
LEFT JOIN runbot_merge_batch b ON (runbot_merge_batch_id = b.id)
LEFT JOIN runbot_merge_stagings s ON (b.staging_id = s.id)
WHERE p.state = 'merged'
AND runbot_merge_pull_requests_id IS NULL
AND p.id != 1
GROUP BY label;
""")
freeze_batches = [
(target, merge_date, {int(r): p for r, p in prs.items()})
for [target], [merge_date], prs in cr._obj
]
stagings = []
for t, m, prs in freeze_batches:
# fetch the preceding successful staging on master
cr.execute("""
SELECT id
FROM runbot_merge_stagings
-- target 1 = master (so we want the last successful master staging before the freeze)
WHERE state = 'success' AND staged_at < %s AND target = 1
ORDER BY staged_at DESC
LIMIT 1
""", [m])
cr.execute("""
SELECT repository_id, commit_id
FROM runbot_merge_stagings_commits
WHERE staging_id = %s
""", cr.fetchone())
commits = dict(cr._obj)
cr.execute("""
INSERT INTO runbot_merge_stagings
(state, active, create_uid, write_uid, target, staged_at, create_date, write_date)
VALUES ('success', false, %s, %s, %s, %s, %s, %s)
RETURNING id
""", [uid, uid, t, m, m, m])
[[staging]] = cr.fetchall()
stagings.append(staging)
for repo, pr in prs.items():
if repo not in commits:
cr.execute("""
INSERT INTO runbot_merge_commit (sha) VALUES (%s)
ON CONFLICT (sha) DO UPDATE
SET to_check = runbot_merge.to_check
RETURNING id
""", [pr['head']])
[cid] = cr.fetchone()
commits[repo] = cid
for repo, commit in commits.items():
cr.execute("""
INSERT INTO runbot_merge_stagings_commits
(staging_id, repository_id, commit_id)
VALUES (%s, %s, %s)
""", [staging, repo, commit])
cr.execute("""
INSERT INTO runbot_merge_stagings_heads
(staging_id, repository_id, commit_id)
VALUES (%s, %s, %s)
""", [staging, repo, commit])
batches = []
for staging, (_, date, _) in zip(stagings, freeze_batches):
cr.execute("""
INSERT INTO runbot_merge_batch
(create_uid, write_uid, staging_id, create_date, write_date)
VALUES (%s, %s, %s, %s, %s)
RETURNING id
""", [uid, uid, staging, date, date])
[[batch]] = cr.fetchall()
batches.append(batch)
for batch, (_, _, prs) in zip(batches, freeze_batches):
for pr in prs.values():
cr.execute("""
INSERT INTO runbot_merge_batch_runbot_merge_pull_requests_rel
(runbot_merge_batch_id, runbot_merge_pull_requests_id)
VALUES (%s, %s)
""", [batch, pr['id']])
def migrate(cr, version):
cr.execute("select from forwardport_batches")
assert not cr.rowcount, f"can't migrate the mergebot with enqueued forward ports (found {cr.rowcount})"
# avoid SQL taking absolutely ungodly amounts of time
cr.execute("SET statement_timeout = '60s'")
# will be recreated & computed on the fly
cr.execute("""
ALTER TABLE runbot_merge_batch
DROP COLUMN target,
DROP COLUMN active
""")
cleanup(cr)
batch_freezes(cr)
cr.execute("""
SELECT
source_name,
array_agg(json_build_array(gs.target, gs.prs) order by gs.seq desc)
FROM (
SELECT
rr.name || '#' || source.number as source_name,
t.sequence as seq,
t.name as target,
array_agg(json_build_array(r.name || '#' || p.number, p.state)) as prs
FROM runbot_merge_pull_requests p
JOIN runbot_merge_repository r ON (r.id = p.repository)
JOIN runbot_merge_branch t ON (t.id = p.target)
JOIN runbot_merge_pull_requests source ON (source.id = p.source_id)
JOIN runbot_merge_repository rr ON (rr.id = source.repository)
GROUP BY source.id, rr.id, t.id
HAVING count(*) FILTER (WHERE p.state = 'merged') > 1
) gs
GROUP BY source_name
""")
if cr.rowcount:
msg = "Found inconsistent batches, which will confuse later chaining\n\n"
for source, per_target in cr._obj:
msg += f"source {source}\n"
for target, prs in per_target:
msg += "\t{} {}\n".format(
target,
", ".join(f'{p} ({s})' for p, s in prs),
)
raise Exception(msg)
logger.info("add batch columns...")
cr.execute("""
CREATE TYPE runbot_merge_batch_priority
AS ENUM ('default', 'priority', 'alone');
ALTER TABLE runbot_merge_batch
-- backfilled from staging
ADD COLUMN merge_date timestamp,
-- backfilled from PRs
ADD COLUMN priority runbot_merge_batch_priority NOT NULL DEFAULT 'default',
ADD COLUMN skipchecks boolean NOT NULL DEFAULT false,
ADD COLUMN cancel_staging boolean NOT NULL DEFAULT false,
ADD COLUMN fw_policy varchar NOT NULL DEFAULT 'default'
;
""")
# batches not linked to stagings are likely to be useless
logger.info("add batch/staging join table...")
cr.execute("""
CREATE TABLE runbot_merge_staging_batch (
id serial PRIMARY KEY,
runbot_merge_batch_id integer NOT NULL REFERENCES runbot_merge_batch(id) ON DELETE CASCADE,
runbot_merge_stagings_id integer NOT NULL REFERENCES runbot_merge_stagings(id) ON DELETE CASCADE
);
CREATE UNIQUE INDEX runbot_merge_staging_batch_idx ON runbot_merge_staging_batch
(runbot_merge_stagings_id, runbot_merge_batch_id);
CREATE INDEX runbot_merge_staging_batch_rev ON runbot_merge_staging_batch
(runbot_merge_batch_id) INCLUDE (runbot_merge_stagings_id);
""")
# old 'bot creates a new batch at staging time, associated with that
# specific staging, the way to recoup them (to the best of our ability) is
# to assume a new style batch is a set of PRs, so if we group batches by prs
# we get more or less the set of relevant batches / stagings
logger.info("collect batches...")
clusters, to_batch = collate_real_batches(cr)
logger.info("collate batches...")
to_delete = []
batch_staging_links = []
to_rejoin = []
for cluster in clusters.clusters:
first = cluster.merged_batch or min(cluster.batches)
to_delete.extend(cluster.batches - {first})
# link all the PRs back to that batch
to_rejoin.append((first, list(cluster.prs)))
# link `first` to `staging`, ordering insertions by `batch` in order
# to conserve batching order
batch_staging_links.extend(
(batch, first, staging)
for batch, staging in cluster.stagings
)
logger.info("link batches to stagings...")
# sort (unique_batch, staging) by initial batch so that we create the new
# bits in the correct order hopefully
batch_staging_links.sort()
execute_values(
cr._obj,
"INSERT INTO runbot_merge_staging_batch (runbot_merge_batch_id, runbot_merge_stagings_id) VALUES %s",
((b, s) for _, b, s in batch_staging_links),
page_size=1000,
)
logger.info("detach PRs from \"active\" batches...")
# there are non-deactivated batches floating around, which are not linked
# to stagings, they seem linked to updates (forward-ported PRs getting
# updated), but not exclusively
cr.execute("UPDATE runbot_merge_pull_requests SET batch_id = NULL WHERE batch_id IS NOT NULL")
# drop constraint because pg checks it even though we've set all the active batches to null
cr.execute("ALTER TABLE runbot_merge_pull_requests DROP CONSTRAINT runbot_merge_pull_requests_batch_id_fkey")
while to_delete:
ds, to_delete = to_delete[:10000], to_delete[10000:]
logger.info("delete %d leftover batches", len(ds))
cr.execute("DELETE FROM runbot_merge_batch WHERE id = any(%s)", [ds])
logger.info("delete staging column...")
cr.execute("ALTER TABLE runbot_merge_batch DROP COLUMN staging_id;")
logger.info("relink PRs...")
cr.execute("DROP TABLE runbot_merge_batch_runbot_merge_pull_requests_rel")
execute_batch(
cr._obj,
"UPDATE runbot_merge_pull_requests SET batch_id = %s WHERE id = any(%s)",
to_rejoin,
page_size=1000,
)
# at this point all the surviving batches should have associated PRs
cr.execute("""
SELECT b.id
FROM runbot_merge_batch b
LEFT JOIN runbot_merge_pull_requests p ON p.batch_id = b.id
WHERE p IS NULL;
""")
if cr.rowcount:
logger.error(
"All batches should have at least one PR, found %d without",
cr.rowcount,
)
# the relinked batches are those from stagings, but that means merged PRs
# (or at least PRs we tried to merge), we also need batches for non-closed
# non-merged PRs
logger.info("collect unbatched PRs...")
cr.execute("""
SELECT
CASE
WHEN label SIMILAR TO '%%:patch-[[:digit:]]+'
THEN id::text
ELSE label
END as label_but_not,
array_agg(id),
array_agg(distinct target)
FROM runbot_merge_pull_requests
WHERE batch_id IS NULL AND id != all(%s)
GROUP BY label_but_not
""", [[pid for b in to_batch for pid in b]])
for _label, ids, targets in cr._obj:
# a few batches are nonsensical e.g. multiple PRs on different
# targets from th same branch or mix of master upgrade and stable
# branch community, split them out
if len(targets) > 1:
to_batch.extend([id] for id in ids)
else:
to_batch.append(ids)
logger.info("create %d new batches for unbatched prs...", len(to_batch))
cr.execute(
SQL("INSERT INTO runbot_merge_batch VALUES {} RETURNING id").format(
SQL(", ").join([SQL("(DEFAULT)")]*len(to_batch))))
logger.info("link unbatched PRs to batches...")
execute_batch(
cr._obj,
"UPDATE runbot_merge_pull_requests SET batch_id = %s WHERE id = any(%s)",
[(batch_id, ids) for ids, [batch_id] in zip(to_batch, cr.fetchall())],
page_size=1000,
)
cr.execute("SELECT state, count(*) FROM runbot_merge_pull_requests WHERE batch_id IS NULL GROUP BY state")
if cr.rowcount:
prs = cr.fetchall()
logger.error(
"Found %d PRs without a batch:%s",
sum(c for _, c in prs),
"".join(
f"\n\t- {c} {p!r} PRs"
for p, c in prs
),
)
logger.info("move pr data to batches...")
cr.execute("""
UPDATE runbot_merge_batch b
SET merge_date = v.merge_date,
priority = v.p::varchar::runbot_merge_batch_priority,
skipchecks = v.skipchecks,
cancel_staging = v.cancel_staging,
fw_policy = case when v.skipci
THEN 'skipci'
ELSE 'default'
END
FROM (
SELECT
batch_id as id,
max(priority) as p,
min(merge_date) as merge_date,
-- added to PRs in 1.11 so can be aggregated & copied over
bool_or(skipchecks) as skipchecks,
bool_or(cancel_staging) as cancel_staging,
bool_or(fw_policy = 'skipci') as skipci
FROM runbot_merge_pull_requests
GROUP BY batch_id
) v
WHERE b.id = v.id
""")
logger.info("restore batch constraint...")
cr.execute("""
ALTER TABLE runbot_merge_pull_requests
ADD CONSTRAINT runbot_merge_pull_requests_batch_id_fkey
FOREIGN KEY (batch_id)
REFERENCES runbot_merge_batch (id)
""")
# remove xid for x_prs (not sure why it exists)
cr.execute("""
DELETE FROM ir_model_data
WHERE module = 'forwardport'
AND name = 'field_forwardport_batches__x_prs'
""")
# update (x_)prs to match the updated field type(s)
cr.execute("""
UPDATE ir_model_fields
SET ttype = 'one2many',
relation = 'runbot_merge.pull_requests',
relation_field = 'batch_id'
WHERE model_id = 445 AND name = 'prs';
UPDATE ir_model_fields
SET ttype = 'one2many'
WHERE model_id = 448 AND name = 'x_prs';
""")
logger.info("generate batch parenting...")
cr.execute("SELECT id, project_id, name FROM runbot_merge_branch ORDER BY project_id, sequence, name")
# branch_id -> str
branch_names = {}
# branch_id -> project_id
projects = {}
# project_id -> list[branch_id]
branches_for_project = {}
for bid, pid, name in cr._obj:
branch_names[bid] = name
projects[bid] = pid
branches_for_project.setdefault(pid, []).append(bid)
cr.execute("""
SELECT batch_id,
array_agg(distinct target),
array_agg(json_build_object(
'id', p.id,
'name', r.name || '#' || number,
'repo', r.name,
'number', number,
'state', p.state,
'source', source_id
))
FROM runbot_merge_pull_requests p
JOIN runbot_merge_repository r ON (r.id = p.repository)
GROUP BY batch_id
""")
todos = []
descendants = defaultdict(list)
targets = {}
batches = {}
batch_prs = {}
for batch, target_ids, prs in cr._obj:
assert len(target_ids) == 1, \
"Found batch with multiple targets {tnames} {prs}".format(
tnames=', '.join(branch_names[id] for id in target_ids),
prs=prs,
)
todos.append((batch, target_ids[0], prs))
batch_prs[batch] = prs
for pr in prs:
pr['link'] = link(pr['name'], "https://mergebot.odoo.com/{repo}/pull/{number}".format_map(pr))
targets[pr['id']] = target_ids[0]
batches[pr['id']] = batch
batches[pr['name']] = batch
if pr['source']:
descendants[pr['source']].append(pr['id'])
else:
# put source PRs as their own descendants otherwise the linkage
# fails when trying to find the top-most parent
descendants[pr['id']].append(pr['id'])
assert None not in descendants
for prs in chain(
KNOWN_BATCHES,
chain.from_iterable(WEIRD_SEQUENCES),
):
batch_of_prs = {batches[f'odoo/{p}'] for p in prs}
assert len(batch_of_prs) == 1,\
"assumed {prs} were the same batch, got {batch_of_prs}".format(
prs=', '.join(prs),
batch_of_prs='; '.join(
'{} => {}'.format(p, batches[f'odoo/{p}'])
for p in prs
)
)
prs_of_batch = {pr['name'].removeprefix('odoo/') for pr in batch_prs[batch_of_prs.pop()]}
assert set(prs) == prs_of_batch,\
"assumed batch would contain {prs}, got {prs_of_batch}".format(
prs=', '.join(prs),
prs_of_batch=', '.join(prs_of_batch),
)
parenting = []
for batch, target, prs in todos:
sources = [p['source'] for p in prs if p['source']]
# can't have parent batch without source PRs
if not sources:
continue
pid = projects[target]
branches = branches_for_project[pid]
# we need all the preceding targets in order to jump over disabled branches
previous_targets = branches[branches.index(target) + 1:]
if not previous_targets:
continue
for previous_target in previous_targets:
# from each source, find the descendant targeting the earlier target,
# then get the batch of these PRs
parents = {
batches[descendant]
for source in sources
for descendant in descendants[source]
if targets[descendant] == previous_target
}
if parents:
break
else:
continue
if len(parents) == 2:
parents1, parents2 = [batch_prs[parent] for parent in parents]
# if all of one parent are merged and all of the other are not, take the merged side
if all(p['state'] == 'merged' for p in parents1) and all(p['state'] != 'merged' for p in parents2):
parents = [list(parents)[0]]
elif all(p['state'] != 'merged' for p in parents1) and all(p['state'] == 'merged' for p in parents2):
parents = [list(parents)[1]]
elif len(parents1) == 1 and len(parents2) == 1 and len(prs) == 1:
# if one of the candidates is older than the current PR
# (lower id) and the other one younger, assume the first one is
# correct
p = min(parents, key=lambda p: batch_prs[p][0]['id'])
low = batch_prs[p]
high = batch_prs[max(parents, key=lambda p: batch_prs[p][0]['id'])]
if low[0]['id'] < prs[0]['id'] < high[0]['id']:
parents = [p]
if real_parents := SAAS_135_INSERTION_CONFUSION.get(tuple(sorted(parents))):
parents = real_parents
assert len(parents) == 1,\
("Found multiple candidates for batch {batch} ({prs})"
" with target {target} (previous={previous_target})\n\t{parents}".format(
parents="\n\t".join(
"{} ({})".format(
parent,
", ".join(
f"{p['link']} ({p['state']}, {branch_names[targets[p['id']]]})"
for p in batch_prs[parent]
)
)
for parent in parents
),
batch=batch,
target=branch_names[target],
previous_target=branch_names[previous_target],
prs=', '.join(map("{link} ({state})".format_map, prs)),
))
parenting.append((parents.pop(), batch))
logger.info("set batch parenting...")
# add column down here otherwise the FK constraint has to be verified for
# each batch we try to delete and that is horrendously slow, deferring the
# constraints is not awesome because we need to check it at the first DDL
# and that's still way slower than feels necessary
cr.execute("""
ALTER TABLE runbot_merge_batch
ADD COLUMN parent_id integer
REFERENCES runbot_merge_batch(id)
""")
execute_batch(
cr._obj,
"UPDATE runbot_merge_batch SET parent_id = %s WHERE id = %s",
parenting,
page_size=1000,
)
@dataclasses.dataclass(slots=True, kw_only=True)
class Cluster:
merged_batch: int | None = None
prs: set[int] = dataclasses.field(default_factory=set)
batches: set[int] = dataclasses.field(default_factory=set)
stagings: set[tuple[int, int]] = dataclasses.field(default_factory=set)
"set of original (batch, staging) pairs"
@dataclasses.dataclass
class Clusters:
clusters: list[Cluster] = dataclasses.field(default_factory=list)
by_batch: dict[int, Cluster] = dataclasses.field(default_factory=dict)
by_pr: dict[int, Cluster] = dataclasses.field(default_factory=dict)
@dataclasses.dataclass(slots=True, kw_only=True)
class Batch:
staging: int | None = None
merged: bool = False
prs: set[int] = dataclasses.field(default_factory=set)
T = TypeVar('T')
def insert(s: set[T], v: T) -> bool:
"""Inserts v in s if not in, and returns whether an insertion was needed.
"""
if v in s:
return False
else:
s.add(v)
return True
def collate_real_batches(cr: Any) -> tuple[Clusters, list[list[int]]]:
cr.execute('''
SELECT
st.id as staging,
st.state as staging_state,
b.id as batch_id,
p.id as pr_id
FROM runbot_merge_batch_runbot_merge_pull_requests_rel br
JOIN runbot_merge_batch b ON (b.id = br.runbot_merge_batch_id)
JOIN runbot_merge_pull_requests as p ON (p.id = br.runbot_merge_pull_requests_id)
LEFT JOIN runbot_merge_stagings st ON (st.id = b.staging_id)
''')
batch_map: dict[int, Batch] = {}
pr_to_batches = defaultdict(set)
for staging_id, staging_state, batch_id, pr_id in cr.fetchall():
pr_to_batches[pr_id].add(batch_id)
if batch := batch_map.get(batch_id):
batch.prs.add(pr_id)
else:
batch_map[batch_id] = Batch(
staging=staging_id,
merged=staging_state == 'success',
prs={pr_id},
)
# maps a PR name to its id
cr.execute("""
SELECT r.name || '#' || p.number, p.id
FROM runbot_merge_pull_requests p
JOIN runbot_merge_repository r ON (r.id = p.repository)
WHERE r.name || '#' || p.number = any(%s)
""", [[f'odoo/{p}' for seq in WEIRD_SEQUENCES for b in seq if len(b) > 1 for p in b]])
prmap: dict[str, int] = dict(cr._obj)
to_batch = []
# for each WEIRD_SEQUENCES batch, we need to merge their batches if any,
# and create them otherwise
for batch in (b for seq in WEIRD_SEQUENCES for b in seq if len(b) > 1):
ids = [prmap[f'odoo/{n}'] for n in batch]
batches = {b for pid in ids for b in pr_to_batches[pid]}
if batches:
for pid in ids:
pr_to_batches[pid].update(batches)
for bid in batches:
batch_map[bid].prs.update(ids)
else:
# need to create a new batch
to_batch.append(ids)
clusters = Clusters()
# we can start from either the PR or the batch side to reconstruct a cluster
for pr_id in pr_to_batches:
if pr_id in clusters.by_pr:
continue
to_visit = [pr_id]
prs: set[int] = set()
merged_batch = None
batches: set[int] = set()
stagings: set[tuple[int, int]] = set()
while to_visit:
pr_id = to_visit.pop()
if not insert(prs, pr_id):
continue
for batch_id in pr_to_batches[pr_id]:
if not insert(batches, batch_id):
continue
b = batch_map[batch_id]
if s := b.staging:
stagings.add((batch_id, s))
if b.merged:
merged_batch = batch_id
to_visit.extend(b.prs - prs)
c = Cluster(merged_batch=merged_batch, prs=prs, batches=batches, stagings=stagings)
clusters.clusters.append(c)
clusters.by_batch.update((batch_id, c) for batch_id in c.batches)
clusters.by_pr.update((pr_id, c) for pr_id in c.prs)
return clusters, to_batch
# at the creation of saas 13.5, the forwardbot clearly got very confused and
# somehow did not correctly link the PRs it reinserted together, leading to
# some of them being merged separately, leading the batch parenting linker thing
# to be extremely confused
SAAS_135_INSERTION_CONFUSION = {
(48200, 48237): [48237],
(48353, 48388): [48353],
(48571, 48602): [48602],
(73614, 73841): [73614],
}
KNOWN_BATCHES = [
# both closed, same source (should be trivial)
["odoo#151827", "enterprise#55453"],
["odoo#66743", "enterprise#16631"],
# both closed but different sources
["odoo#57659", "enterprise#13204"],
["odoo#57752", "enterprise#13238"],
["odoo#94152", "enterprise#28664"],
["odoo#114059", "enterprise#37690"],
["odoo#152904", "enterprise#55975"],
# one closed the other not, different sources (so a PR was added in the
# middle of a forward port then its descendant was closed evn though the
# other repo / sequence kept on keeping)
["odoo#113422", "enterprise#37429"],
["odoo#151992", "enterprise#55501"],
["odoo#159211", "enterprise#59407"],
# closed without a sibling but their source had a sibling
["odoo#67727"], # enterprise closed at enterprise#16631
["odoo#70828"], # enterprise closed at enterprise#17901
["odoo#132817"], # enterprise closed at enterprise#44656
["odoo#137855"], # enterprise closed at enterprise#48092
["enterprise#49430"], # odoo closed at odoo#139515
["odoo#109811", "enterprise#35966"],
["odoo#110311", "enterprise#35983"],
["odoo#110576"],
]
# This is next level weird compared to the previous so it gets extra care:
# these are sequences with multiple points of divergence or grafting
WEIRD_SEQUENCES = [
[
["odoo#40466"],
["odoo#40607"],
["odoo#40613", "odoo#41106"],
["odoo#40615", "odoo#41112"],
["odoo#40627", "odoo#41116", "odoo#41163"],
["odoo#40638", "odoo#41119", "odoo#41165"],
],
[
["odoo#46405"],
["odoo#46698"],
["odoo#46820"],
["odoo#46974"],
["odoo#47273"],
["odoo#47345", "enterprise#9259"],
["odoo#47349", "odoo#47724", "enterprise#9274"],
],
[
["odoo#47923"],
["odoo#47986"],
["odoo#47991", "odoo#48010"],
["odoo#47996", "odoo#48015", "odoo#48016"],
["odoo#48003"],
],
[
["enterprise#9996"],
["enterprise#10062", "odoo#49828"],
["enterprise#10065", "odoo#49852", "enterprise#10076"],
["enterprise#10173", "odoo#50087"],
["enterprise#10179", "odoo#50104"],
["enterprise#10181", "odoo#50110"],
],
[
["enterprise#16357"],
["enterprise#16371"],
["enterprise#16375", "enterprise#16381"],
["enterprise#16378", "enterprise#16385"],
["enterprise#16379", "enterprise#16390"],
],
[
["odoo#55112"],
["odoo#55120"],
["odoo#55123", "odoo#55159"],
["odoo#55128", "odoo#55169"],
["odoo#55135", "odoo#55171"],
["odoo#55140", "odoo#55172"],
],
[
["odoo#56254", "enterprise#12558"],
["odoo#56294", "enterprise#12564"],
["odoo#56300", "enterprise#12566"],
["odoo#56340", "enterprise#12589", "enterprise#12604"],
["odoo#56391", "enterprise#12608"],
],
[
["enterprise#12565", "odoo#56299"],
["enterprise#12572", "odoo#56309", "odoo#56494"],
["enterprise#12660", "odoo#56518"],
["enterprise#12688", "odoo#56581"],
["enterprise#12691"],
],
[
["odoo#64706"],
["odoo#65275"],
["odoo#65279", "odoo#65405"],
["odoo#65489", "odoo#65491"],
],
[
["odoo#66176"],
["odoo#66188"],
["odoo#66191"],
["odoo#66194", "odoo#66226"],
["odoo#66200", "odoo#66229", "odoo#66277"],
["odoo#66204", "odoo#66232", "odoo#66283"],
["odoo#66208", "odoo#66234", "odoo#66285", "odoo#66303"],
],
[
["enterprise#22089", "odoo#79348"],
["enterprise#26736", "odoo#90050"],
["enterprise#31822", "odoo#101218", "odoo#106002"],
["enterprise#36014", "odoo#110369", "odoo#113892"],
["enterprise#37690", "odoo#114059"],
],
]

View File

@ -0,0 +1,4 @@
def migrate(cr, version):
cr.execute("ALTER TABLE runbot_merge_stagings "
"ADD COLUMN staging_end timestamp without time zone")
cr.execute("UPDATE runbot_merge_stagings SET staging_end = write_date")

View File

@ -0,0 +1,12 @@
def migrate(cr, version):
cr.execute("""
CREATE TABLE runbot_merge_events_sources (
id serial primary key,
repository varchar not null,
secret varchar
);
INSERT INTO runbot_merge_events_sources (repository, secret)
SELECT r.name, p.secret
FROM runbot_merge_repository r
JOIN runbot_merge_project p ON p.id = r.project_id;
""")

View File

@ -0,0 +1,22 @@
"""Completely missed that in 44084e303ccece3cb54128ab29eab399bd4d24e9 I
completely changed the semantics and structure of the statuses_cache, so the
old caches don't actually work anymore at all.
This rewrites all existing caches.
"""
def migrate(cr, version):
cr.execute("""
WITH statuses AS (
SELECT
s.id as staging_id,
json_object_agg(c.sha, c.statuses::json) as statuses
FROM runbot_merge_stagings s
LEFT JOIN runbot_merge_stagings_heads h ON (h.staging_id = s.id)
LEFT JOIN runbot_merge_commit c ON (h.commit_id = c.id)
GROUP BY s.id
)
UPDATE runbot_merge_stagings
SET statuses_cache = statuses
FROM statuses
WHERE id = staging_id
""")

View File

@ -0,0 +1,6 @@
from pathlib import Path
def migrate(cr, version):
sql = Path(__file__).parent.joinpath('upgrade.sql')\
.read_text(encoding='utf-8')
cr.execute(sql)

View File

@ -0,0 +1,62 @@
CREATE TABLE runbot_merge_stagings_commits (
id serial NOT NULL,
staging_id integer not null references runbot_merge_stagings (id),
commit_id integer not null references runbot_merge_commit (id),
repository_id integer not null references runbot_merge_repository (id)
);
CREATE TABLE runbot_merge_stagings_heads (
id serial NOT NULL,
staging_id integer NOT NULL REFERENCES runbot_merge_stagings (id),
commit_id integer NOT NULL REFERENCES runbot_merge_commit (id),
repository_id integer NOT NULL REFERENCES runbot_merge_repository (id)
);
-- some of the older stagings only have the head, not the commit,
-- add the commit
UPDATE runbot_merge_stagings
SET heads = heads::jsonb || jsonb_build_object(
'odoo/odoo^', heads::json->'odoo/odoo',
'odoo/enterprise^', heads::json->'odoo/enterprise'
)
WHERE heads NOT ILIKE '%^%';
-- some of the stagings have heads which don't exist in the commits table,
-- because they never got a status from the runbot...
-- create fake commits so we don't lose heads
INSERT INTO runbot_merge_commit (sha, statuses, create_uid, create_date, write_uid, write_date)
SELECT r.value, '{}', s.create_uid, s.create_date, s.create_uid, s.create_date
FROM runbot_merge_stagings s,
json_each_text(s.heads::json) r
ON CONFLICT DO NOTHING;
CREATE TEMPORARY TABLE staging_commits (
id integer NOT NULL,
repo integer NOT NULL,
-- the staging head (may be a dedup, may be the same as commit)
head integer NOT NULL,
-- the staged commit
commit integer NOT NULL
);
-- the splatting works entirely off of the staged head
-- (the one without the ^ suffix), we concat the `^` to get the corresponding
-- merge head (the actual commit to push to the branch)
INSERT INTO staging_commits (id, repo, head, commit)
SELECT s.id, re.id AS repo, h.id AS head, c.id AS commit
FROM runbot_merge_stagings s,
json_each_text(s.heads::json) r,
runbot_merge_commit h,
runbot_merge_commit c,
runbot_merge_repository re
WHERE r.key NOT ILIKE '%^'
AND re.name = r.key
AND h.sha = r.value
AND c.sha = s.heads::json->>(r.key || '^');
INSERT INTO runbot_merge_stagings_heads (staging_id, repository_id, commit_id)
SELECT id, repo, head FROM staging_commits;
INSERT INTO runbot_merge_stagings_commits (staging_id, repository_id, commit_id)
SELECT id, repo, commit FROM staging_commits;
ALTER TABLE runbot_merge_stagings DROP COLUMN heads;

View File

@ -0,0 +1,32 @@
from psycopg2.extras import execute_values
def migrate(cr, version):
# Drop all legacy style "previous failures": this is for PRs
# several years old so almost certainly long irrelevant, and it
# allows removing the workaround for them. Legacy style has the
# `state`, `target`, `description` keys at the toplevel while new
# style is like commit statuses, with the contexts at the toplevel
# and the status info below.
cr.execute("""
UPDATE runbot_merge_pull_requests
SET previous_failure = '{}'
WHERE previous_failure::jsonb ? 'state'
""")
cr.execute("""
WITH new_statuses (id, statuses) AS (
SELECT id, json_object_agg(
key,
CASE WHEN jsonb_typeof(value) = 'string'
THEN jsonb_build_object('state', value, 'target_url', null, 'description', null)
ELSE value
END
) AS statuses
FROM runbot_merge_commit
CROSS JOIN LATERAL jsonb_each(statuses::jsonb) s
WHERE jsonb_path_match(statuses::jsonb, '$.*.type() != "object"')
GROUP BY id
)
UPDATE runbot_merge_commit SET statuses = new_statuses.statuses FROM new_statuses WHERE runbot_merge_commit.id = new_statuses.id
""")

View File

@ -1,6 +1,11 @@
from . import mail_thread
from . import ir_actions
from . import res_partner
from . import project
from . import pull_requests
from . import batch
from . import project_freeze
from . import stagings_create
from . import staging_cancel
from . import events_sources
from . import crons

View File

@ -0,0 +1,537 @@
from __future__ import annotations
import base64
import contextlib
import logging
import os
import re
from collections import defaultdict
from collections.abc import Iterator
import requests
from psycopg2 import sql
from odoo import models, fields, api
from .utils import enum
from .. import git
_logger = logging.getLogger(__name__)
FOOTER = '\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n'
class StagingBatch(models.Model):
_name = 'runbot_merge.staging.batch'
_description = "link between batches and staging in order to maintain an " \
"ordering relationship between the batches of a staging"
_log_access = False
_order = 'id'
runbot_merge_batch_id = fields.Many2one('runbot_merge.batch', required=True)
runbot_merge_stagings_id = fields.Many2one('runbot_merge.stagings', required=True)
def init(self):
super().init()
self.env.cr.execute(sql.SQL("""
CREATE UNIQUE INDEX IF NOT EXISTS runbot_merge_staging_batch_idx
ON {table} (runbot_merge_stagings_id, runbot_merge_batch_id);
CREATE INDEX IF NOT EXISTS runbot_merge_staging_batch_rev
ON {table} (runbot_merge_batch_id) INCLUDE (runbot_merge_stagings_id);
""").format(table=sql.Identifier(self._table)))
class Batch(models.Model):
""" A batch is a "horizontal" grouping of *codependent* PRs: PRs with
the same label & target but for different repositories. These are
assumed to be part of the same "change" smeared over multiple
repositories e.g. change an API in repo1, this breaks use of that API
in repo2 which now needs to be updated.
"""
_name = 'runbot_merge.batch'
_description = "batch of pull request"
_inherit = ['mail.thread']
_parent_store = True
_order = "id desc"
name = fields.Char(compute="_compute_name", search="_search_name")
target = fields.Many2one('runbot_merge.branch', store=True, compute='_compute_target')
batch_staging_ids = fields.One2many('runbot_merge.staging.batch', 'runbot_merge_batch_id')
staging_ids = fields.Many2many(
'runbot_merge.stagings',
compute="_compute_staging_ids",
context={'active_test': False},
)
split_id = fields.Many2one('runbot_merge.split', index=True)
all_prs = fields.One2many('runbot_merge.pull_requests', 'batch_id')
prs = fields.One2many('runbot_merge.pull_requests', compute='_compute_open_prs', search='_search_open_prs')
active = fields.Boolean(compute='_compute_active', store=True, help="closed batches (batches containing only closed PRs)")
fw_policy = fields.Selection([
('no', "Do not port forward"),
('default', "Default"),
('skipci', "Skip CI"),
], required=True, default="default", string="Forward Port Policy", tracking=True)
merge_date = fields.Datetime(tracking=True)
# having skipchecks skip both validation *and approval* makes sense because
# it's batch-wise, having to approve individual PRs is annoying
skipchecks = fields.Boolean(
string="Skips Checks",
default=False, tracking=True,
help="Forces entire batch to be ready, skips validation and approval",
)
cancel_staging = fields.Boolean(
string="Cancels Stagings",
default=False, tracking=True,
help="Cancels current staging on target branch when becoming ready"
)
priority = fields.Selection([
('default', "Default"),
('priority', "Priority"),
('alone', "Alone"),
], default='default', group_operator=None, required=True, tracking=True,
column_type=enum(_name, 'priority'),
)
blocked = fields.Char(store=True, compute="_compute_blocked")
# unlike on PRs, this does not get detached... ? (because batches can be
# partially detached so that's a PR-level concern)
parent_path = fields.Char(index=True, unaccent=False)
parent_id = fields.Many2one("runbot_merge.batch")
genealogy_ids = fields.Many2many(
"runbot_merge.batch",
compute="_compute_genealogy",
context={"active_test": False},
)
@api.depends('batch_staging_ids.runbot_merge_stagings_id')
def _compute_staging_ids(self):
for batch in self:
batch.staging_ids = batch.batch_staging_ids.runbot_merge_stagings_id
@property
def source(self):
return self.browse(map(int, self.parent_path.split('/', 1)[:1]))
def descendants(self, include_self: bool = False) -> Iterator[Batch]:
# in DB both will prefix-match on the literal prefix then apply a
# trivial filter (even though the filter is technically unnecessary for
# the first form), doing it like this means we don't have to `- self`
# in the ``not include_self`` case
if include_self:
pattern = self.parent_path + '%'
else:
pattern = self.parent_path + '_%'
act = self.env.context.get('active_test', True)
return self\
.with_context(active_test=False)\
.search([("parent_path", '=like', pattern)], order="parent_path")\
.with_context(active_test=act)
# also depends on all the descendants of the source or sth
@api.depends('parent_path')
def _compute_genealogy(self):
for batch in self:
sid = next(iter(batch.parent_path.split('/', 1)))
batch.genealogy_ids = self \
.with_context(active_test=False)\
.search([("parent_path", "=like", f"{sid}/%")], order="parent_path")\
def _auto_init(self):
for field in self._fields.values():
if not isinstance(field, fields.Selection) or field.column_type[0] == 'varchar':
continue
t = field.column_type[1]
self.env.cr.execute("SELECT FROM pg_type WHERE typname = %s", [t])
if not self.env.cr.rowcount:
self.env.cr.execute(
f"CREATE TYPE {t} AS ENUM %s",
[tuple(s for s, _ in field.selection)]
)
super()._auto_init()
self.env.cr.execute("""
CREATE INDEX IF NOT EXISTS runbot_merge_batch_ready_idx
ON runbot_merge_batch (target, priority)
WHERE blocked IS NULL;
CREATE INDEX IF NOT EXISTS runbot_merge_batch_parent_id_idx
ON runbot_merge_batch (parent_id)
WHERE parent_id IS NOT NULL;
""")
@api.depends('all_prs.closed')
def _compute_active(self):
for b in self:
b.active = not all(p.closed for p in b.all_prs)
@api.depends('all_prs.closed')
def _compute_open_prs(self):
for b in self:
b.prs = b.all_prs.filtered(lambda p: not p.closed)
def _search_open_prs(self, operator, value):
return [('all_prs', operator, value), ('active', '=', True)]
@api.depends("prs.label")
def _compute_name(self):
for batch in self:
batch.name = batch.prs[:1].label or batch.all_prs[:1].label
def _search_name(self, operator, value):
return [('all_prs.label', operator, value)]
@api.depends("all_prs.target", "all_prs.closed")
def _compute_target(self):
for batch in self:
targets = batch.prs.mapped('target') or batch.all_prs.mapped('target')
batch.target = targets if len(targets) == 1 else False
@api.depends(
"merge_date",
"prs.error", "prs.draft", "prs.squash", "prs.merge_method",
"skipchecks",
"prs.status", "prs.reviewed_by", "prs.target",
)
def _compute_blocked(self):
for batch in self:
if batch.merge_date:
batch.blocked = "Merged."
elif not batch.active:
batch.blocked = "all prs are closed"
elif len(targets := batch.prs.mapped('target')) > 1:
batch.blocked = f"Multiple target branches: {', '.join(targets.mapped('name'))!r}"
elif blocking := batch.prs.filtered(
lambda p: p.error or p.draft or not (p.squash or p.merge_method)
):
batch.blocked = "Pull request(s) %s blocked." % ', '.join(blocking.mapped('display_name'))
elif not batch.skipchecks and (unready := batch.prs.filtered(
lambda p: not (p.reviewed_by and p.status == "success")
)):
unreviewed = ', '.join(unready.filtered(lambda p: not p.reviewed_by).mapped('display_name'))
unvalidated = ', '.join(unready.filtered(lambda p: p.status == 'pending').mapped('display_name'))
failed = ', '.join(unready.filtered(lambda p: p.status == 'failure').mapped('display_name'))
batch.blocked = "Pull request(s) %s." % ', '.join(filter(None, [
unreviewed and f"{unreviewed} are waiting for review",
unvalidated and f"{unvalidated} are waiting for CI",
failed and f"{failed} have failed CI",
]))
else:
if batch.blocked:
self.env.ref("runbot_merge.staging_cron")._trigger()
if batch.cancel_staging:
if splits := batch.target.split_ids:
splits.unlink()
batch.target.active_staging_id.cancel(
'unstaged by %s becoming ready',
', '.join(batch.prs.mapped('display_name')),
)
batch.blocked = False
def _port_forward(self):
if not self:
return
proj = self.target.project_id
if not proj.fp_github_token:
_logger.warning(
"Can not forward-port %s (%s): no token on project %s",
self,
', '.join(self.prs.mapped('display_name')),
proj.name
)
return
notarget = [r.name for r in self.prs.repository if not r.fp_remote_target]
if notarget:
_logger.error(
"Can not forward-port %s (%s): repos %s don't have a forward port remote configured",
self,
', '.join(self.prs.mapped('display_name')),
', '.join(notarget),
)
return
all_sources = [(p.source_id or p) for p in self.prs]
all_targets = [p._find_next_target() for p in self.prs]
if all(t is None for t in all_targets):
# TODO: maybe add a feedback message?
_logger.info(
"Will not forward port %s (%s): no next target",
self,
', '.join(self.prs.mapped('display_name'))
)
return
PRs = self.env['runbot_merge.pull_requests']
targets = defaultdict(lambda: PRs)
for p, t in zip(self.prs, all_targets):
if t:
targets[t] |= p
else:
_logger.info("Skip forward porting %s (of %s): no next target", p.display_name, self)
# all the PRs *with a next target* should have the same, we can have PRs
# stopping forward port earlier but skipping... probably not
if len(targets) != 1:
for t, prs in targets.items():
linked, other = next((
(linked, other)
for other, linkeds in targets.items()
if other != t
for linked in linkeds
))
for pr in prs:
self.env.ref('runbot_merge.forwardport.failure.discrepancy')._send(
repository=pr.repository,
pull_request=pr.number,
token_field='fp_github_token',
format_args={'pr': pr, 'linked': linked, 'next': t.name, 'other': other.name},
)
_logger.warning(
"Cancelling forward-port of %s (%s): found different next branches (%s)",
self,
', '.join(self.prs.mapped('display_name')),
', '.join(t.name for t in targets),
)
return
target, prs = next(iter(targets.items()))
# this is run by the cron, no need to check if otherwise scheduled:
# either the scheduled job is this one, or it's an other scheduling
# which will run after this one and will see the port already exists
if self.search_count([('parent_id', '=', self.id), ('target', '=', target.id)]):
_logger.warning(
"Will not forward-port %s (%s): already ported",
self,
', '.join(prs.mapped('display_name'))
)
return
# the base PR is the PR with the "oldest" target
base = max(all_sources, key=lambda p: (p.target.sequence, p.target.name))
# take only the branch bit
new_branch = '%s-%s-%s-fw' % (
target.name,
base.refname,
# avoid collisions between fp branches (labels can be reused
# or conflict especially as we're chopping off the owner)
base64.urlsafe_b64encode(os.urandom(3)).decode()
)
conflicts = {}
for pr in prs:
repo = git.get_local(pr.repository)
conflicts[pr], head = pr._create_fp_branch(repo, target)
repo.push(git.fw_url(pr.repository), f"{head}:refs/heads/{new_branch}")
gh = requests.Session()
gh.headers['Authorization'] = 'token %s' % proj.fp_github_token
has_conflicts = any(conflicts.values())
# could create a batch here but then we'd have to update `_from_gh` to
# take a batch and then `create` to not automatically resolve batches,
# easier to not do that.
new_batch = PRs.browse(())
self.env.cr.execute('LOCK runbot_merge_pull_requests IN SHARE MODE')
for pr in prs:
owner, _ = pr.repository.fp_remote_target.split('/', 1)
source = pr.source_id or pr
root = pr.root_id
message = source.message + '\n\n' + '\n'.join(
"Forward-Port-Of: %s" % p.display_name
for p in root | source
)
title, body = re.match(r'(?P<title>[^\n]+)\n*(?P<body>.*)', message, flags=re.DOTALL).groups()
r = gh.post(f'https://api.github.com/repos/{pr.repository.name}/pulls', json={
'base': target.name,
'head': f'{owner}:{new_branch}',
'title': '[FW]' + (' ' if title[0] != '[' else '') + title,
'body': body
})
if not r.ok:
_logger.warning("Failed to create forward-port PR for %s, deleting branches", pr.display_name)
# delete all the branches this should automatically close the
# PRs if we've created any. Using the API here is probably
# simpler than going through the working copies
for repo in prs.mapped('repository'):
d = gh.delete(f'https://api.github.com/repos/{repo.fp_remote_target}/git/refs/heads/{new_branch}')
if d.ok:
_logger.info("Deleting %s:%s=success", repo.fp_remote_target, new_branch)
else:
_logger.warning("Deleting %s:%s=%s", repo.fp_remote_target, new_branch, d.text)
raise RuntimeError(f"Forwardport failure: {pr.display_name} ({r.text})")
new_pr = PRs._from_gh(r.json())
_logger.info("Created forward-port PR %s", new_pr)
new_batch |= new_pr
# allows PR author to close or skipci
new_pr.write({
'merge_method': pr.merge_method,
'source_id': source.id,
# only link to previous PR of sequence if cherrypick passed
'parent_id': pr.id if not has_conflicts else False,
'detach_reason': "conflicts:\n{}".format('\n\n'.join(
f"{out}\n{err}".strip()
for _, out, err, _ in filter(None, conflicts.values())
)) if has_conflicts else None,
})
if has_conflicts and pr.parent_id and pr.state not in ('merged', 'closed'):
self.env.ref('runbot_merge.forwardport.failure.conflict')._send(
repository=pr.repository,
pull_request=pr.number,
token_field='fp_github_token',
format_args={'source': source, 'pr': pr, 'new': new_pr, 'footer': FOOTER},
)
for pr, new_pr in zip(prs, new_batch):
new_pr._fp_conflict_feedback(pr, conflicts)
labels = ['forwardport']
if has_conflicts:
labels.append('conflict')
self.env['runbot_merge.pull_requests.tagging'].create({
'repository': new_pr.repository.id,
'pull_request': new_pr.number,
'tags_add': labels,
})
new_batch = new_batch.batch_id
new_batch.parent_id = self
# try to schedule followup
new_batch._schedule_fp_followup()
return new_batch
def _schedule_fp_followup(self, *, force_fw=False):
_logger = logging.getLogger(__name__).getChild('forwardport.next')
# if the PR has a parent and is CI-validated, enqueue the next PR
scheduled = self.browse(())
for batch in self:
prs = ', '.join(batch.prs.mapped('display_name'))
_logger.info('Checking if forward-port %s (%s)', batch, prs)
# in cas of conflict or update individual PRs will "lose" their
# parent, which should prevent forward porting
#
# even if we force_fw, a *followup* should still only be for forward
# ports so check that the batch has a parent (which should be the
# same thing as all the PRs having a source, kinda, but cheaper,
# it's not entirely true as technically the user could have added a
# PR to the forward ported batch
if not (batch.parent_id and force_fw or all(p.parent_id for p in batch.prs)):
_logger.info('-> no parent %s (%s)', batch, prs)
continue
if not force_fw and batch.source.fw_policy != 'skipci' \
and (invalid := batch.prs.filtered(lambda p: p.state not in ['validated', 'ready'])):
_logger.info(
'-> wrong state %s (%s)',
batch,
', '.join(f"{p.display_name}: {p.state}" for p in invalid),
)
continue
# check if we've already forward-ported this branch
next_target = batch._find_next_targets()
if not next_target:
_logger.info("-> forward port done (no next target)")
continue
if len(next_target) > 1:
_logger.error(
"-> cancelling forward-port of %s (%s): inconsistent next target branch (%s)",
batch,
prs,
', '.join(next_target.mapped('name')),
)
if n := self.search([
('target', '=', next_target.id),
('parent_id', '=', batch.id),
], limit=1):
_logger.info('-> already forward-ported (%s)', n)
continue
_logger.info("check pending port for %s (%s)", batch, prs)
if self.env['forwardport.batches'].search_count([('batch_id', '=', batch.id)]):
_logger.warning('-> already recorded')
continue
_logger.info('-> ok')
self.env['forwardport.batches'].create({
'batch_id': batch.id,
'source': 'fp',
})
scheduled |= batch
return scheduled
def _find_next_target(self):
"""Retrieves the next target from every PR, and returns it if it's the
same for all the PRs which have one (PRs without a next target are
ignored, this is considered acceptable).
If the next targets are inconsistent, returns no next target.
"""
next_target = self._find_next_targets()
if len(next_target) == 1:
return next_target
else:
return self.env['runbot_merge.branch'].browse(())
def _find_next_targets(self):
return self.prs.mapped(lambda p: p._find_next_target() or self.env['runbot_merge.branch'])
def write(self, vals):
if vals.get('merge_date'):
# TODO: remove condition when everything is merged
remover = self.env.get('forwardport.branch_remover')
if remover is not None:
remover.create([
{'pr_id': p.id}
for b in self
if not b.merge_date
for p in b.prs
])
if vals.get('fw_policy') == 'skipci':
nonskip = self.filtered(lambda b: b.fw_policy != 'skipci')
else:
nonskip = self.browse(())
super().write(vals)
# if we change the policy to skip CI, schedule followups on merged
# batches which were not previously marked as skipping CI
if nonskip:
toggled = nonskip.filtered(lambda b: b.merge_date)
tips = toggled.mapped(lambda b: b.genealogy_ids[-1:])
for tip in tips:
tip._schedule_fp_followup()
return True
@api.ondelete(at_uninstall=True)
def _on_delete_clear_stagings(self):
self.batch_staging_ids.unlink()
def unlink(self):
"""
batches can be unlinked if they:
- have run out of PRs
- and don't have a parent batch (which is not being deleted)
- and don't have a child batch (which is not being deleted)
this is to keep track of forward port histories at the batch level
"""
unlinkable = self.filtered(
lambda b: not (b.prs or (b.parent_id - self) or (self.search([('parent_id', '=', b.id)]) - self))
)
return super(Batch, unlinkable).unlink()

View File

@ -0,0 +1,386 @@
import enum
from collections.abc import Iterator
from dataclasses import dataclass, field
from functools import partial
from operator import contains
from typing import Callable, List, Optional, Union, Tuple
def tokenize(line: str) -> Iterator[str]:
cur = ''
for c in line:
if c == '-' and not cur:
yield '-'
elif c in ' \t+=,':
if cur:
yield cur
cur = ''
if not c.isspace():
yield c
else:
cur += c
if cur:
yield cur
def normalize(it: Iterator[str]) -> Iterator[str]:
"""Converts shorthand tokens to expanded version
"""
for t in it:
match t:
case 'r':
yield 'review'
case 'r-':
yield 'review'
yield '-'
case _:
yield t
@dataclass
class Peekable(Iterator[str]):
it: Iterator[str]
memo: Optional[str] = None
def __iter__(self) -> Iterator[str]:
return self
def __next__(self) -> str:
if self.memo is not None:
v, self.memo = self.memo, None
return v
return next(self.it)
def peek(self) -> Optional[str]:
if self.memo is None:
self.memo = next(self.it, None)
return self.memo
class CommandError(Exception):
pass
class Approve:
def __init__(self, ids: Optional[List[int]] = None) -> None:
self.ids = ids
def __str__(self) -> str:
if self.ids is not None:
ids = ','.join(map(str, self.ids))
return f"r={ids}"
return 'review+'
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield "r(eview)+", "approves the PR, if it's a forwardport also approves all non-detached parents"
yield "r(eview)=<number>", "only approves the specified parents"
class Reject:
def __str__(self) -> str:
return 'review-'
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield "r(eview)-", "removes approval of a previously approved PR, if the PR is staged the staging will be cancelled"
class MergeMethod(enum.Enum):
SQUASH = 'squash'
REBASE_FF = 'rebase-ff'
REBASE_MERGE = 'rebase-merge'
MERGE = 'merge'
def __str__(self) -> str:
return self.value
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield str(cls.MERGE), "integrate the PR with a simple merge commit, using the PR description as message"
yield str(cls.REBASE_MERGE), "rebases the PR on top of the target branch the integrates with a merge commit, using the PR description as message"
yield str(cls.REBASE_FF), "rebases the PR on top of the target branch, then fast-forwards"
yield str(cls.SQUASH), "squashes the PR as a single commit on the target branch, using the PR description as message"
class Retry:
def __str__(self) -> str:
return 'retry'
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield "retry", 're-tries staging a PR in the "error" state'
class Check:
def __str__(self) -> str:
return 'check'
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield "check", "fetches or refreshes PR metadata, resets mergebot state"
@dataclass
class Override:
statuses: List[str] = field(default_factory=list)
def __str__(self) -> str:
return f"override={','.join(self.statuses)}"
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield "override=<...>", "marks overridable statuses as successful"
@dataclass
class Delegate:
users: List[str] = field(default_factory=list)
def __str__(self) -> str:
if not self.users:
return 'delegate+'
return f"delegate={','.join(self.users)}"
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield "delegate+", "grants approval rights to the PR author"
yield "delegate=<...>", "grants approval rights on this PR to the specified github users"
class Priority(enum.Enum):
DEFAULT = enum.auto()
PRIORITY = enum.auto()
ALONE = enum.auto()
def __str__(self) -> str:
return self.name.lower()
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield str(cls.DEFAULT), "stages the PR normally"
yield str(cls.PRIORITY), "tries to stage this PR first, then adds `default` PRs if the staging has room"
yield str(cls.ALONE), "stages this PR only with other PRs of the same priority"
class CancelStaging:
def __str__(self) -> str:
return "cancel=staging"
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield "cancel=staging", "automatically cancels the current staging when this PR becomes ready"
class SkipChecks:
def __str__(self) -> str:
return 'skipchecks'
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield "skipchecks", "bypasses both statuses and review"
class FW(enum.Enum):
DEFAULT = enum.auto()
NO = enum.auto()
SKIPCI = enum.auto()
SKIPMERGE = enum.auto()
def __str__(self) -> str:
return f'fw={self.name.lower()}'
@classmethod
def help(cls, is_reviewer: bool) -> Iterator[Tuple[str, str]]:
yield str(cls.NO), "does not forward-port this PR"
if is_reviewer:
yield str(cls.DEFAULT), "forward-ports this PR normally"
yield str(cls.SKIPCI), "does not wait for a forward-port's statuses to succeed before creating the next one"
@dataclass
class Limit:
branch: Optional[str]
def __str__(self) -> str:
if self.branch is None:
return 'ignore'
return f'up to {self.branch}'
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield "up to <branch>", "only ports this PR forward to the specified branch (included)"
class Close:
def __str__(self) -> str:
return 'close'
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield str(cls()), "closes this forward-port"
class Help:
def __str__(self) -> str:
return 'help'
@classmethod
def help(cls, _: bool) -> Iterator[Tuple[str, str]]:
yield str(cls()), "displays this help"
Command = Union[
Approve,
CancelStaging,
Close,
Check,
Delegate,
FW,
Help,
Limit,
MergeMethod,
Override,
Priority,
Reject,
Retry,
SkipChecks,
]
class Parser:
def __init__(self, line: str) -> None:
self.it = Peekable(normalize(tokenize(line)))
def __iter__(self) -> Iterator[Command]:
for token in self.it:
if token.startswith("NOW"):
# any number of ! is allowed
if token.startswith("NOW!"):
yield Priority.ALONE
elif token == "NOW":
yield Priority.PRIORITY
else:
raise CommandError(f"unknown command {token!r}")
yield SkipChecks()
yield CancelStaging()
continue
handler = getattr(type(self), f'parse_{token.replace("-", "_")}', None)
if handler:
yield handler(self)
elif '!' in token:
raise CommandError("no need to scream")
else:
raise CommandError(f"unknown command {token!r}")
def assert_next(self, val: str) -> None:
if (actual := next(self.it, None)) != val:
raise CommandError(f"expected {val!r}, got {actual!r}")
def check_next(self, val: str) -> bool:
if self.it.peek() == val:
self.it.memo = None # consume peeked value
return True
return False
def parse_review(self) -> Union[Approve, Reject]:
t = next(self.it, None)
if t == '+':
return Approve()
if t == '-':
return Reject()
if t == '=':
t = next(self.it, None)
if not (t and t.isdecimal()):
raise CommandError(f"expected PR ID to approve, found {t!r}")
ids = [int(t)]
while self.check_next(','):
id = next(self.it, None)
if id and id.isdecimal():
ids.append(int(id))
else:
raise CommandError(f"expected PR ID to approve, found {id!r}")
return Approve(ids)
raise CommandError(f"unknown review {t!r}")
def parse_squash(self) -> MergeMethod:
return MergeMethod.SQUASH
def parse_rebase_ff(self) -> MergeMethod:
return MergeMethod.REBASE_FF
def parse_rebase_merge(self) -> MergeMethod:
return MergeMethod.REBASE_MERGE
def parse_merge(self) -> MergeMethod:
return MergeMethod.MERGE
def parse_retry(self) -> Retry:
return Retry()
def parse_check(self) -> Check:
return Check()
def parse_override(self) -> Override:
self.assert_next('=')
ci = [next(self.it)]
while self.check_next(','):
ci.append(next(self.it))
return Override(ci)
def parse_delegate(self) -> Delegate:
match next(self.it, None):
case '+':
return Delegate()
case '=':
delegates = [next(self.it).lstrip('#@')]
while self.check_next(','):
delegates.append(next(self.it).lstrip('#@'))
return Delegate(delegates)
case d:
raise CommandError(f"unknown delegation {d!r}")
def parse_default(self) -> Priority:
return Priority.DEFAULT
def parse_priority(self) -> Priority:
return Priority.PRIORITY
def parse_alone(self) -> Priority:
return Priority.ALONE
def parse_cancel(self) -> CancelStaging:
self.assert_next('=')
self.assert_next('staging')
return CancelStaging()
def parse_skipchecks(self) -> SkipChecks:
return SkipChecks()
def parse_fw(self) -> FW:
self.assert_next('=')
f = next(self.it, "")
try:
if f in ('disable', 'disabled'):
return FW.NO
return FW[f.upper()]
except KeyError:
raise CommandError(f"unknown fw configuration {f or None!r}") from None
def parse_ignore(self) -> Limit:
return Limit(None)
def parse_up(self) -> Limit:
self.assert_next('to')
if limit := next(self.it, None):
return Limit(limit)
else:
raise CommandError("please provide a branch to forward-port to")
def parse_close(self) -> Close:
return Close()
def parse_help(self) -> Help:
return Help()

View File

@ -0,0 +1,2 @@
from . import git_maintenance
from . import cleanup_scratch_branches

View File

@ -0,0 +1,33 @@
import logging
from odoo import models
_logger = logging.getLogger(__name__)
class BranchCleanup(models.TransientModel):
_name = 'runbot_merge.branch_cleanup'
_description = "cleans up scratch refs for deactivated branches"
def _run(self):
domain = [('active', '=', False)]
if lastcall := self.env.context['lastcall']:
domain.append(('write_date', '>=', lastcall))
deactivated = self.env['runbot_merge.branch'].search(domain)
_logger.info(
"deleting scratch (tmp and staging) refs for branches %s",
', '.join(b.name for b in deactivated)
)
# loop around the repos first, so we can reuse the gh instance
for r in deactivated.mapped('project_id.repo_ids'):
gh = r.github()
for b in deactivated:
if b.project_id != r.project_id:
continue
res = gh('delete', f'git/refs/heads/tmp.{b.name}', check=False)
if res.status_code != 204:
_logger.info("no tmp branch found for %s:%s", r.name, b.name)
res = gh('delete', f'git/refs/heads/staging.{b.name}', check=False)
if res.status_code != 204:
_logger.info("no staging branch found for %s:%s", r.name, b.name)

View File

@ -0,0 +1,23 @@
<odoo>
<record id="access_branch_cleanup" model="ir.model.access">
<field name="name">Access to branch cleanup is useless</field>
<field name="model_id" ref="model_runbot_merge_branch_cleanup"/>
<field name="perm_read">0</field>
<field name="perm_create">0</field>
<field name="perm_write">0</field>
<field name="perm_unlink">0</field>
</record>
<record model="ir.cron" id="branch_cleanup">
<field name="name">Removal of scratch refs for deactivated branch</field>
<field name="model_id" ref="model_runbot_merge_branch_cleanup"/>
<field name="state">code</field>
<field name="code">model._run()</field>
<!--
nota: even though this is only triggered, numbercall has to be
non-zero because the counter is taken in account by cron triggers
-->
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
</record>
</odoo>

View File

@ -0,0 +1,44 @@
import logging
import subprocess
from odoo import models
from ...git import get_local
_gc = logging.getLogger(__name__)
class GC(models.TransientModel):
_name = 'runbot_merge.maintenance'
_description = "Weekly maintenance of... cache repos?"
def _run(self):
# lock out crons which use the local repo cache to avoid concurrency
# issues while we're GC-ing it
Stagings = self.env['runbot_merge.stagings']
crons = self.env.ref('runbot_merge.staging_cron', Stagings) | self.env.ref('forwardport.port_forward', Stagings)
if crons:
self.env.cr.execute("""
SELECT 1 FROM ir_cron
WHERE id = any(%s)
FOR UPDATE
""", [crons.ids])
# run on all repos with a forwardport target (~ forwardport enabled)
for repo in self.env['runbot_merge.repository'].search([]):
repo_git = get_local(repo, clone=False)
if not repo_git:
continue
_gc.info('Running maintenance on %s', repo.name)
r = repo_git\
.stdout(True)\
.with_config(stderr=subprocess.STDOUT, text=True, check=False)\
.remote('prune', 'origin')
if r.returncode:
_gc.warning("Prune failure (status=%d):\n%s", r.returncode, r.stdout)
r = repo_git\
.stdout(True)\
.with_config(stderr=subprocess.STDOUT, text=True, check=False)\
.gc('--prune=now', aggressive=True)
if r.returncode:
_gc.warning("GC failure (status=%d):\n%s", r.returncode, r.stdout)

View File

@ -0,0 +1,26 @@
<odoo>
<record id="access_forwardport_maintenance" model="ir.model.access">
<field name="name">Access to maintenance is useless</field>
<field name="model_id" ref="model_runbot_merge_maintenance"/>
<field name="perm_read">0</field>
<field name="perm_create">0</field>
<field name="perm_write">0</field>
<field name="perm_unlink">0</field>
</record>
<record model="ir.cron" id="maintenance">
<field name="name">Maintenance of repo cache</field>
<field name="model_id" ref="model_runbot_merge_maintenance"/>
<field name="state">code</field>
<field name="code">model._run()</field>
<!--
run sunday morning as it can take a while, unlikely someone will need
to stage or forward-port stuff at that point
-->
<field name="nextcall" eval="datetime.utcnow() + relativedelta(weekday=6, hour=2, minute=0, second=0, microsecond=0)"/>
<field name="interval_number">1</field>
<field name="interval_type">weeks</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
</record>
</odoo>

View File

@ -0,0 +1,12 @@
from odoo import models, fields
class EventsSources(models.Model):
_name = 'runbot_merge.events_sources'
_description = 'Valid Webhook Event Sources'
_order = "repository"
_rec_name = "repository"
# FIXME: unique repo? Or allow multiple secrets per repo?
repository = fields.Char(index=True, required=True)
secret = fields.Char()

View File

@ -0,0 +1,33 @@
from collections import ChainMap
from odoo import models
from odoo.tools import ConstantMapping
class MailThread(models.AbstractModel):
_inherit = 'mail.thread'
def _message_compute_author(self, author_id=None, email_from=None, raise_on_email=True):
if author_id is None and self:
mta = self.env.cr.precommit.data.get(f'mail.tracking.author.{self._name}', {})
authors = self.env['res.partner'].union(*(p for r in self if (p := mta.get(r.id))))
if len(authors) == 1:
author_id = authors.id
v = super()._message_compute_author(author_id, email_from, raise_on_email)
return v
def _track_set_author(self, author, *, fallback=False):
""" Set the author of the tracking message. """
if not self._track_get_fields():
return
authors = self.env.cr.precommit.data.setdefault(f'mail.tracking.author.{self._name}', {})
if fallback:
details = authors
if isinstance(authors, ChainMap):
details = authors.maps[0]
self.env.cr.precommit.data[f'mail.tracking.author.{self._name}'] = ChainMap(
details,
ConstantMapping(author),
)
else:
return super()._track_set_author(author)

View File

@ -1,7 +1,14 @@
import logging
import re
from typing import List
from odoo import models, fields
import requests
import sentry_sdk
from odoo import models, fields, api
from odoo.exceptions import UserError
from odoo.osv import expression
from odoo.tools import reverse_order
_logger = logging.getLogger(__name__)
class Project(models.Model):
@ -19,6 +26,14 @@ class Project(models.Model):
help="Branches of all project's repos which are managed by the merge bot. Also "\
"target branches of PR this project handles."
)
staging_enabled = fields.Boolean(default=True)
staging_priority = fields.Selection([
('default', "Splits over ready PRs"),
('largest', "Largest of split and ready PRs"),
('ready', "Ready PRs over split"),
], default="default", required=True)
staging_statuses = fields.Boolean(default=True)
staging_rpc = fields.Boolean(default=False)
ci_timeout = fields.Integer(
default=60, required=True, group_operator=None,
@ -26,30 +41,92 @@ class Project(models.Model):
)
github_token = fields.Char("Github Token", required=True)
github_name = fields.Char(store=True, compute="_compute_identity")
github_email = fields.Char(store=True, compute="_compute_identity")
github_prefix = fields.Char(
required=True,
default="hanson", # mergebot du bot du bot du~
help="Prefix (~bot name) used when sending commands from PR "
"comments e.g. [hanson retry] or [hanson r+ p=1]"
"comments e.g. [hanson retry] or [hanson r+ priority]",
)
fp_github_token = fields.Char()
fp_github_name = fields.Char(store=True, compute="_compute_git_identity")
batch_limit = fields.Integer(
default=8, group_operator=None, help="Maximum number of PRs staged together")
secret = fields.Char(
help="Webhook secret. If set, will be checked against the signature "
"of (valid) incoming webhook signatures, failing signatures "
"will lead to webhook rejection. Should only use ASCII."
)
freeze_id = fields.Many2one('runbot_merge.project.freeze', compute='_compute_freeze')
freeze_reminder = fields.Text()
def _check_stagings(self, commit=False):
for branch in self.search([]).mapped('branch_ids').filtered('active'):
staging = branch.active_staging_id
if not staging:
uniquifier = fields.Boolean(
default=True,
help="Whether to add a uniquifier commit on repositories without PRs"
" during staging. The lack of uniquifier can lead to CI conflicts"
" as github works off of commits, so it's possible for an"
" unrelated build to trigger a failure if somebody is a dummy and"
" includes repos they have no commit for."
)
@api.depends('github_token')
def _compute_identity(self):
s = requests.Session()
for project in self:
if not project.github_token or (project.github_name and project.github_email):
continue
r0 = s.get('https://api.github.com/user', headers={
'Authorization': 'token %s' % project.github_token
})
if not r0.ok:
_logger.error("Failed to fetch merge bot information for project %s: %s", project.name, r0.text or r0.content)
continue
r = r0.json()
project.github_name = r['name'] or r['login']
if email := r['email']:
project.github_email = email
continue
if 'user:email' not in set(re.split(r',\s*', r0.headers['x-oauth-scopes'])):
raise UserError("The merge bot github token needs the user:email scope to fetch the bot's identity.")
r1 = s.get('https://api.github.com/user/emails', headers={
'Authorization': 'token %s' % project.github_token
})
if not r1.ok:
_logger.error("Failed to fetch merge bot emails for project %s: %s", project.name, r1.text or r1.content)
continue
project.github_email = next((
entry['email']
for entry in r1.json()
if entry['primary']
), None)
if not project.github_email:
raise UserError("The merge bot needs a public or accessible primary email set up.")
# technically the email could change at any moment...
@api.depends('fp_github_token')
def _compute_git_identity(self):
s = requests.Session()
for project in self:
if project.fp_github_name or not project.fp_github_token:
continue
r0 = s.get('https://api.github.com/user', headers={
'Authorization': 'token %s' % project.fp_github_token
})
if not r0.ok:
_logger.error("Failed to fetch forward bot information for project %s: %s", project.name, r0.text or r0.content)
continue
user = r0.json()
project.fp_github_name = user['name'] or user['login']
def _check_stagings(self, commit=False):
# check branches with an active staging
for branch in self.env['runbot_merge.branch']\
.with_context(active_test=False)\
.search([('active_staging_id', '!=', False)]):
staging = branch.active_staging_id
try:
with self.env.cr.savepoint():
staging.check_status()
@ -61,23 +138,41 @@ class Project(models.Model):
self.env.cr.commit()
def _create_stagings(self, commit=False):
for branch in self.search([]).mapped('branch_ids').filtered('active'):
if not branch.active_staging_id:
try:
with self.env.cr.savepoint():
branch.try_staging()
except Exception:
_logger.exception("Failed to create staging for branch %r", branch.name)
else:
if commit:
self.env.cr.commit()
from .stagings_create import try_staging
def _find_commands(self, comment):
# look up branches which can be staged on and have no active staging
for branch in self.env['runbot_merge.branch'].search([
('active_staging_id', '=', False),
('active', '=', True),
('staging_enabled', '=', True),
('project_id.staging_enabled', '=', True),
]):
try:
with self.env.cr.savepoint(), \
sentry_sdk.start_span(description=f'create staging {branch.name}') as span:
span.set_tag('branch', branch.name)
try_staging(branch)
except Exception:
_logger.exception("Failed to create staging for branch %r", branch.name)
else:
if commit:
self.env.cr.commit()
def _find_commands(self, comment: str) -> List[str]:
"""Tries to find all the lines starting (ignoring leading whitespace)
with either the merge or the forward port bot identifiers.
For convenience, the identifier *can* be prefixed with an ``@`` or
``#``, and suffixed with a ``:``.
"""
# horizontal whitespace (\s - {\n, \r}), but Python doesn't have \h or \p{Blank}
h = r'[^\S\r\n]'
return re.findall(
r'^\s*[@|#]?{}:? (.*)$'.format(self.github_prefix),
fr'^{h}*[@|#]?{self.github_prefix}(?:{h}+|:{h}*)(.*)$',
comment, re.MULTILINE | re.IGNORECASE)
def _has_branch(self, name):
self.env['runbot_merge.branch'].flush_model(['project_id', 'name'])
self.env.cr.execute("""
SELECT 1 FROM runbot_merge_branch
WHERE project_id = %s AND name = %s
@ -121,3 +216,10 @@ class Project(models.Model):
]
})
return w.action_open()
def _forward_port_ordered(self, domain=()):
Branches = self.env['runbot_merge.branch']
return Branches.search(expression.AND([
[('project_id', '=', self.id)],
domain or [],
]), order=reverse_order(Branches._order))

View File

@ -1,18 +1,19 @@
import contextlib
import enum
import itertools
import json
import logging
import time
from collections import Counter
from typing import Dict
from markupsafe import Markup
from odoo import models, fields, api, Command
from odoo.addons.runbot_merge.exceptions import FastForwardError
from odoo.exceptions import UserError
from odoo.tools import drop_view_if_exists
from ... import git
from ..pull_requests import Repository
_logger = logging.getLogger(__name__)
class FreezeWizard(models.Model):
_name = 'runbot_merge.project.freeze'
@ -177,11 +178,13 @@ class FreezeWizard(models.Model):
if self.errors:
return self.action_open()
conflict_crons = self.env.ref('runbot_merge.merge_cron') | self.env.ref('runbot_merge.staging_cron')
conflict_crons = self.env.ref('runbot_merge.merge_cron')\
| self.env.ref('runbot_merge.staging_cron')\
| self.env.ref('runbot_merge.process_updated_commits')
# we don't want to run concurrently to the crons above, though we
# don't need to prevent read access to them
self.env.cr.execute(
'SELECT * FROM ir_cron WHERE id =ANY(%s) FOR SHARE NOWAIT',
'SELECT FROM ir_cron WHERE id =ANY(%s) FOR SHARE NOWAIT',
[conflict_crons.ids]
)
@ -190,6 +193,12 @@ class FreezeWizard(models.Model):
# everything so the new branch is the second one, just after the branch
# it "forks"
master, rest = project_id.branch_ids[0], project_id.branch_ids[1:]
if self.bump_pr_ids and master.active_staging_id:
self.env.cr.execute(
'SELECT FROM runbot_merge_stagings WHERE id = %s FOR UPDATE NOWAIT',
[master.active_staging_id.id]
)
seq = itertools.count(start=1) # start reseq at 1
commands = [
(1, master.id, {'sequence': next(seq)}),
@ -203,50 +212,65 @@ class FreezeWizard(models.Model):
master_name = master.name
gh_sessions = {r: r.github() for r in self.project_id.repo_ids}
repos: Dict[Repository, git.Repo] = {
r: git.get_local(r).check(False)
for r in self.project_id.repo_ids
}
for repo, copy in repos.items():
copy.fetch(git.source_url(repo), '+refs/heads/*:refs/heads/*')
all_prs = self.release_pr_ids.pr_id | self.bump_pr_ids.pr_id
for pr in all_prs:
repos[pr.repository].fetch(
git.source_url(pr.repository),
pr.head,
)
# prep new branch (via tmp refs) on every repo
rel_heads = {}
rel_heads: Dict[Repository, str] = {}
# store for master heads as odds are high the bump pr(s) will be on the
# same repo as one of the release PRs
prevs = {}
prevs: Dict[Repository, str] = {}
for rel in self.release_pr_ids:
repo_id = rel.repository_id
gh = gh_sessions[repo_id]
try:
prev = prevs[repo_id] = gh.head(master_name)
except Exception:
raise UserError(f"Unable to resolve branch {master_name} of repository {repo_id.name} to a commit.")
except Exception as e:
raise UserError(f"Unable to resolve branch {master_name} of repository {repo_id.name} to a commit.") from e
# create the tmp branch to merge the PR into
tmp_branch = f'tmp.{self.branch_name}'
try:
gh.set_ref(tmp_branch, prev)
except Exception as err:
raise UserError(f"Unable to create branch {self.branch_name} of repository {repo_id.name}: {err}.")
commits = gh.commits(rel.pr_id.number)
except Exception as e:
raise UserError(f"Unable to fetch commits of release PR {rel.pr_id.display_name}.") from e
rel_heads[repo_id], _ = gh.rebase(rel.pr_id.number, tmp_branch)
time.sleep(1)
_logger.debug("rebasing %s on %s (commits=%s)",
rel.pr_id.display_name, prev, len(commits))
rel_heads[repo_id] = repos[repo_id].rebase(prev, commits)[0]
# prep bump
bump_heads = {}
bump_heads: Dict[Repository, str] = {}
for bump in self.bump_pr_ids:
repo_id = bump.repository_id
gh = gh_sessions[repo_id]
try:
prev = prevs[repo_id] = prevs.get(repo_id) or gh.head(master_name)
except Exception:
raise UserError(f"Unable to resolve branch {master_name} of repository {repo_id.name} to a commit.")
except Exception as e:
raise UserError(f"Unable to resolve branch {master_name} of repository {repo_id.name} to a commit.") from e
# create the tmp branch to merge the PR into
tmp_branch = f'tmp.{master_name}'
try:
gh.set_ref(tmp_branch, prev)
except Exception as err:
raise UserError(f"Unable to create branch {master_name} of repository {repo_id.name}: {err}.")
commits = gh.commits(bump.pr_id.number)
except Exception as e:
raise UserError(f"Unable to fetch commits of bump PR {bump.pr_id.display_name}.") from e
bump_heads[repo_id], _ = gh.rebase(bump.pr_id.number, tmp_branch)
time.sleep(1)
_logger.debug("rebasing %s on %s (commits=%s)",
bump.pr_id.display_name, prev, len(commits))
bump_heads[repo_id] = repos[repo_id].rebase(prev, commits)[0]
# prevent concurrent updates to the commits table so we control the
# creation of commit objects from rebasing the release & bump PRs, do it
# only just before *pushing*
self.env.cr.execute("LOCK runbot_merge_commit IN ACCESS EXCLUSIVE MODE NOWAIT")
deployed = {}
# at this point we've got a bunch of tmp branches with merged release
@ -256,38 +280,39 @@ class FreezeWizard(models.Model):
failure = None
for rel in self.release_pr_ids:
repo_id = rel.repository_id
# helper API currently has no API to ensure we're just creating a
# new branch (as cheaply as possible) so do it by hand
status = None
with contextlib.suppress(Exception):
status = gh_sessions[repo_id].create_ref(self.branch_name, rel_heads[repo_id])
deployed[rel.pr_id.id] = rel_heads[repo_id]
to_delete.append(repo_id)
if status != 201:
if repos[repo_id].push(
git.source_url(repo_id),
f'{rel_heads[repo_id]}:refs/heads/{self.branch_name}',
).returncode:
failure = ('create', repo_id.name, self.branch_name)
break
deployed[rel.pr_id.id] = rel_heads[repo_id]
to_delete.append(repo_id)
else: # all release deployments succeeded
for bump in self.bump_pr_ids:
repo_id = bump.repository_id
try:
gh_sessions[repo_id].fast_forward(master_name, bump_heads[repo_id])
deployed[bump.pr_id.id] = bump_heads[repo_id]
to_revert.append(repo_id)
except FastForwardError:
if repos[repo_id].push(
git.source_url(repo_id),
f'{bump_heads[repo_id]}:refs/heads/{master_name}'
).returncode:
failure = ('fast-forward', repo_id.name, master_name)
break
deployed[bump.pr_id.id] = bump_heads[repo_id]
to_revert.append(repo_id)
if failure:
addendums = []
# creating the branch failed, try to delete all previous branches
failures = []
for prev_id in to_revert:
revert = gh_sessions[prev_id]('PATCH', f'git/refs/heads/{master_name}', json={
'sha': prevs[prev_id],
'force': True
}, check=False)
if not revert.ok:
if repos[prev_id].push(
'-f',
git.source_url(prev_id),
f'{prevs[prev_id]}:refs/heads/{master_name}',
).returncode:
failures.append(prev_id.name)
if failures:
addendums.append(
@ -297,8 +322,10 @@ class FreezeWizard(models.Model):
failures.clear()
for prev_id in to_delete:
deletion = gh_sessions[prev_id]('DELETE', f'git/refs/heads/{self.branch_name}', check=False)
if not deletion.ok:
if repos[prev_id].push(
git.source_url(prev_id),
f':refs/heads/{self.branch_name}'
).returncode:
failures.append(prev_id.name)
if failures:
addendums.append(
@ -317,8 +344,83 @@ class FreezeWizard(models.Model):
f"Unable to {reason} branch {repo}:{branch}.{addendum}"
)
all_prs = self.release_pr_ids.pr_id | self.bump_pr_ids.pr_id
all_prs.state = 'merged'
b = self.env['runbot_merge.branch'].search([('name', '=', self.branch_name)])
# We specifically don't want to modified() or anything.
self.env.cr.execute(
"UPDATE runbot_merge_batch SET target=%s WHERE id = %s;"
"UPDATE runbot_merge_pull_requests SET target=%s WHERE id = any(%s)",
[
b.id, self.release_pr_ids.pr_id.batch_id.id,
b.id, self.release_pr_ids.pr_id.ids,
]
)
all_prs.batch_id.merge_date = fields.Datetime.now()
all_prs.reviewed_by = self.env.user.partner_id.id
for p in all_prs:
p.commits_map = json.dumps({
'': deployed[p.id],
p.head: deployed[p.id]
})
# stagings have to be created conditionally as otherwise we might not
# have a `target` to set and it's mandatory
laster = self.env['runbot_merge.stagings'].search(
[('target', '=', master.id), ('state', '=', 'success')],
order='id desc',
limit=1,
).commits.mapped(lambda c: (c.repository_id, c.commit_id))
if self.release_pr_ids:
rel_items = [(0, 0, {
'repository_id': repo.id,
'commit_id': self.env['runbot_merge.commit'].create({
'sha': sha,
'to_check': False,
}).id,
} if (sha := rel_heads.get(repo)) else {
'repository_id': repo.id,
'commit_id': commit.id,
})
for repo, commit in laster
]
self.env['runbot_merge.stagings'].create([{
'state': 'success',
'reason': 'release freeze staging',
'active': False,
'target': b.id,
'staging_batch_ids': [
(0, 0, {'runbot_merge_batch_id': batch.id})
for batch in self.release_pr_ids.pr_id.batch_id
],
'heads': rel_items,
'commits': rel_items,
}])
if self.bump_pr_ids:
bump_items = [(0, 0, {
'repository_id': repo.id,
'commit_id': self.env['runbot_merge.commit'].create({
'sha': sha,
'to_check': False,
}).id,
} if (sha := bump_heads.get(repo)) else {
'repository_id': repo.id,
'commit_id': commit.id,
})
for repo, commit in laster
]
self.env['runbot_merge.stagings'].create([{
'state': 'success',
'reason': 'bump freeze staging',
'active': False,
'target': master.id,
'staging_batch_ids': [
(0, 0, {'runbot_merge_batch_id': batch.id})
for batch in self.bump_pr_ids.pr_id.batch_id
],
'heads': bump_items,
'commits': bump_items,
}])
self.env['runbot_merge.pull_requests.feedback'].create([{
'repository': pr.repository.id,
'pull_request': pr.number,
@ -460,7 +562,7 @@ class OpenPRLabels(models.Model):
def init(self):
super().init()
drop_view_if_exists(self.env.cr, "runbot_merge_freeze_labels");
drop_view_if_exists(self.env.cr, "runbot_merge_freeze_labels")
self.env.cr.execute("""
CREATE VIEW runbot_merge_freeze_labels AS (
SELECT DISTINCT ON (label)

View File

@ -6,7 +6,7 @@
<form js_class="freeze_wizard">
<sheet>
<div class="alert alert-warning" role="alert"
attrs="{'invisible': [('errors', '=', False)]}">
invisible="not errors">
<field name="errors" readonly="True"/>
</div>
<group>
@ -59,9 +59,9 @@
the style of the button if the form has "no errors"
-->
<button string="Freeze" type="object" name="action_freeze"
class="btn-success" attrs="{'invisible': [('errors', '!=', False)]}"/>
class="btn-success" invisible="errors"/>
<button string="Freeze" type="object" name="action_freeze"
class="btn-primary" attrs="{'invisible': [('errors', '=', False)]}"/>
class="btn-primary" invisible="not errors"/>
<button string="Save &amp; Close" special="save"/>
<button string="Cancel" type="object" name="action_cancel" class="btn-warning"/>
</footer>

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,10 @@
import random
from email.utils import parseaddr
from odoo import fields, models, tools, api
from markupsafe import Markup, escape
import odoo.tools
from odoo import fields, models, tools, api, Command
from .. import github
@ -11,7 +14,8 @@ class CIText(fields.Char):
column_cast_from = ('varchar', 'text')
class Partner(models.Model):
_inherit = 'res.partner'
_name = 'res.partner'
_inherit = ['res.partner', 'mail.thread']
email = fields.Char(index=True)
github_login = CIText()
@ -19,6 +23,7 @@ class Partner(models.Model):
formatted_email = fields.Char(string="commit email", compute='_rfc5322_formatted')
review_rights = fields.One2many('res.partner.review', 'partner_id')
override_rights = fields.Many2many('res.partner.override')
override_sensitive = fields.Boolean(compute="_compute_sensitive_overrides")
def _auto_init(self):
res = super(Partner, self)._auto_init()
@ -45,6 +50,71 @@ class Partner(models.Model):
p.email = gh.user(p.github_login)['email'] or False
return False
@api.depends("override_rights.context")
def _compute_sensitive_overrides(self):
for p in self:
p.override_sensitive = any(o.context == 'ci/security' for o in p.override_rights)
def write(self, vals):
created = []
updated = {}
deleted = set()
for cmd, id, values in vals.get('review_rights', []):
if cmd == Command.DELETE:
deleted.add(id)
elif cmd == Command.CREATE:
# 'repository_id': 3, 'review': True, 'self_review': False
created.append(values)
elif cmd == Command.UPDATE:
updated[id] = values
# could also be LINK for records which are not touched but we don't care
new_rights = None
if r := vals.get('override_rights'):
# only handle reset (for now?) even though technically e.g. 0 works
# the web client doesn't seem to use it (?)
if r[0][0] == 6:
new_rights = self.env['res.partner.override'].browse(r[0][2])
Repo = self.env['runbot_merge.repository'].browse
for p in self:
msgs = []
if ds := p.review_rights.filtered(lambda r: r.id in deleted):
msgs.append("removed review rights on {}\n".format(
', '.join(ds.mapped('repository_id.name'))
))
if us := p.review_rights.filtered(lambda r: r.id in updated):
msgs.extend(
"updated review rights on {}: {}\n".format(
u.repository_id.name,
', '.join(
f'allowed {f}' if v else f'forbid {f}'
for f in ['review', 'self_review']
if (v := updated[u.id].get(f)) is not None
)
)
for u in us
)
msgs.extend(
'added review rights on {}: {}\n'.format(
Repo(c['repository_id']).name,
', '.join(filter(c.get, ['review', 'self_review'])),
)
for c in created
)
if new_rights is not None:
for r in p.override_rights - new_rights:
msgs.append(f"removed override rights for {r.context!r} on {r.repository_id.name}")
for r in new_rights - p.override_rights:
msgs.append(f"added override rights for {r.context!r} on {r.repository_id.name}")
if msgs:
p._message_log(body=Markup('<ul>{}</ul>').format(Markup().join(
map(Markup('<li>{}</li>').format, reversed(msgs))
)))
return super().write(vals)
class PartnerMerge(models.TransientModel):
_inherit = 'base.partner.merge.automatic.wizard'
@ -75,14 +145,13 @@ class ReviewRights(models.Model):
tools.create_unique_index(self._cr, 'runbot_merge_review_m2m', self._table, ['partner_id', 'repository_id'])
return res
def name_get(self):
return [
(r.id, '%s: %s' % (r.repository_id.name, ', '.join(filter(None, [
@api.depends('repository_id.name', 'review', 'self_review')
def _compute_display_name(self):
for r in self:
r.display_name = '%s: %s' % (r.repository_id.name, ', '.join(filter(None, [
r.review and "reviewer",
r.self_review and "self-reviewer"
]))))
for r in self
]
])))
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
@ -103,6 +172,42 @@ class OverrideRights(models.Model):
['context', 'coalesce(repository_id, 0)']
)
@api.model_create_multi
def create(self, vals_list):
for partner, contexts in odoo.tools.groupby((
(partner_id, vals['context'], vals['repository_id'])
for vals in vals_list
# partner_ids is of the form [Command.set(ids)
for partner_id in vals.get('partner_ids', [(None, None, [])])[0][2]
), lambda p: p[0]):
partner = self.env['res.partner'].browse(partner)
for _, context, repository in contexts:
repository = self.env['runbot_merge.repository'].browse(repository)
partner._message_log(body=f"added override rights for {context!r} on {repository.name}")
return super().create(vals_list)
def write(self, vals):
new = None
if pids := vals.get('partner_ids'):
new = self.env['res.partner'].browse(pids[0][2])
if new is not None:
for o in self:
added = new - o.partner_ids
removed = o.partner_ids - new
for p in added:
p._message_log(body=f"added override rights for {o.context!r} on {o.repository_id.name}")
for r in removed:
r._message_log(body=f"removed override rights for {o.context!r} on {o.repository_id.name}")
return super().write(vals)
def unlink(self):
for o in self:
for p in o.partner_ids:
p._message_log(body=f"removed override rights for {o.context!r} on {o.repository_id.name}")
return super().unlink()
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
return self.search((args or []) + [
@ -110,8 +215,10 @@ class OverrideRights(models.Model):
('repository_id.name', operator, name)
], limit=limit).name_get()
def name_get(self):
return [
(r.id, f'{r.repository_id.name}: {r.context}' if r.repository_id else r.context)
for r in self
]
@api.depends('repository_id.name', 'context')
def _compute_display_name(self):
for r in self:
if r.repository_id:
r.display_name = f'{r.repository_id.name}: {r.context}'
else:
r.display_name = r.context

View File

@ -0,0 +1,716 @@
import base64
import contextlib
import dataclasses
import io
import json
import logging
import os
import re
from collections.abc import Mapping
from difflib import Differ
from operator import itemgetter
from typing import Dict, Union, Optional, Literal, Callable, Iterator, Tuple, List, TypeAlias
from werkzeug.datastructures import Headers
from odoo import api, models, fields, Command
from odoo.tools import OrderedSet, groupby
from .pull_requests import Branch, Stagings, PullRequests, Repository
from .batch import Batch
from .. import exceptions, utils, github, git
WAIT_FOR_VISIBILITY = [10, 10, 10, 10]
_logger = logging.getLogger(__name__)
class Project(models.Model):
_inherit = 'runbot_merge.project'
@dataclasses.dataclass(slots=True)
class StagingSlice:
"""Staging state for a single repository:
- gh is a cache for the github proxy object (contains a session for reusing
connection)
- head is the current staging head for the branch of that repo
- working_copy is the local working copy for the staging for that repo
"""
gh: github.GH
head: str
repo: git.Repo
StagingState: TypeAlias = Dict[Repository, StagingSlice]
def try_staging(branch: Branch) -> Optional[Stagings]:
""" Tries to create a staging if the current branch does not already
have one. Returns None if the branch already has a staging or there
is nothing to stage, the newly created staging otherwise.
"""
_logger.info(
"Checking %s (%s) for staging: %s, skip? %s",
branch, branch.name,
branch.active_staging_id,
bool(branch.active_staging_id)
)
if branch.active_staging_id:
return None
def log(label: str, batches: Batch) -> None:
_logger.info(label, ', '.join(batches.mapped('prs.display_name')))
alone, batches = ready_batches(for_branch=branch)
if alone:
log("staging high-priority PRs %s", batches)
elif branch.project_id.staging_priority == 'default':
if split := branch.split_ids[:1]:
batches = split.batch_ids
split.unlink()
log("staging split PRs %s (prioritising splits)", batches)
else:
# priority, normal; priority = sorted ahead of normal, so always picked
# first as long as there's room
log("staging ready PRs %s (prioritising splits)", batches)
elif branch.project_id.staging_priority == 'ready':
if batches:
log("staging ready PRs %s (prioritising ready)", batches)
else:
split = branch.split_ids[:1]
batches = split.batch_ids
split.unlink()
log("staging split PRs %s (prioritising ready)", batches)
else:
assert branch.project_id.staging_priority == 'largest'
maxsplit = max(branch.split_ids, key=lambda s: len(s.batch_ids), default=branch.env['runbot_merge.split'])
_logger.info("largest split = %d, ready = %d", len(maxsplit.batch_ids), len(batches))
# bias towards splits if len(ready) = len(batch_ids)
if len(maxsplit.batch_ids) >= len(batches):
batches = maxsplit.batch_ids
maxsplit.unlink()
log("staging split PRs %s (prioritising largest)", batches)
else:
log("staging ready PRs %s (prioritising largest)", batches)
if not batches:
return
original_heads, staging_state = staging_setup(branch, batches)
staged = stage_batches(branch, batches, staging_state)
if not staged:
return None
env = branch.env
heads = []
commits = []
for repo, it in staging_state.items():
if it.head == original_heads[repo] and branch.project_id.uniquifier:
# if we didn't stage anything for that repo and uniquification is
# enabled, create a dummy commit with a uniquifier to ensure we
# don't hit a previous version of the same to ensure the staging
# head is new and we're building everything
project = branch.project_id
uniquifier = base64.b64encode(os.urandom(12)).decode('ascii')
dummy_head = it.repo.with_config(check=True).commit_tree(
# somewhat exceptionally, `commit-tree` wants an actual tree
# not a tree-ish
tree=f'{it.head}^{{tree}}',
parents=[it.head],
author=(project.github_name, project.github_email),
message=f'''\
force rebuild
uniquifier: {uniquifier}
For-Commit-Id: {it.head}
''',
).stdout.strip()
# see above, ideally we don't need to mark the real head as
# `to_check` because it's an old commit but `DO UPDATE` is necessary
# for `RETURNING` to work, and it doesn't really hurt (maybe)
env.cr.execute(
"INSERT INTO runbot_merge_commit (sha, to_check, statuses) "
"VALUES (%s, false, '{}'), (%s, true, '{}') "
"ON CONFLICT (sha) DO UPDATE SET to_check=true "
"RETURNING id",
[it.head, dummy_head]
)
([commit], [head]) = env.cr.fetchall()
it.head = dummy_head
else:
# otherwise just create a record for that commit, or flag existing
# one as to-recheck in case there are already statuses we want to
# propagate to the staging or something
env.cr.execute(
"INSERT INTO runbot_merge_commit (sha, to_check, statuses) "
"VALUES (%s, true, '{}') "
"ON CONFLICT (sha) DO UPDATE SET to_check=true "
"RETURNING id",
[it.head]
)
[commit] = [head] = env.cr.fetchone()
heads.append(fields.Command.create({
'repository_id': repo.id,
'commit_id': head,
}))
commits.append(fields.Command.create({
'repository_id': repo.id,
'commit_id': commit,
}))
# create actual staging object
st: Stagings = env['runbot_merge.stagings'].create({
'target': branch.id,
'staging_batch_ids': [Command.create({'runbot_merge_batch_id': batch.id}) for batch in staged],
'heads': heads,
'commits': commits,
})
for repo, it in staging_state.items():
_logger.info(
"%s: create staging for %s:%s at %s",
branch.project_id.name, repo.name, branch.name,
it.head
)
it.repo.stdout(False).check(True).push(
'-f',
git.source_url(repo),
f'{it.head}:refs/heads/staging.{branch.name}',
)
_logger.info("Created staging %s (%s) to %s", st, ', '.join(
'%s[%s]' % (batch, batch.prs)
for batch in staged
), st.target.name)
return st
def ready_batches(for_branch: Branch) -> Tuple[bool, Batch]:
env = for_branch.env
# splits are ready by definition, we need to exclude them from the ready
# rows otherwise if a prioritised (alone) PR is part of a split it'll be
# staged through priority *and* through split.
split_ids = for_branch.split_ids.batch_ids.ids
env.cr.execute("""
SELECT max(priority)
FROM runbot_merge_batch
WHERE blocked IS NULL AND target = %s AND NOT id = any(%s)
""", [for_branch.id, split_ids])
alone = env.cr.fetchone()[0] == 'alone'
return (
alone,
env['runbot_merge.batch'].search([
('target', '=', for_branch.id),
('blocked', '=', False),
('priority', '=', 'alone') if alone else (1, '=', 1),
('id', 'not in', split_ids),
], order="priority DESC, id ASC"),
)
def staging_setup(
target: Branch,
batches: Batch,
) -> Tuple[Dict[Repository, str], StagingState]:
"""Sets up the staging:
- stores baseline info
- creates tmp branch via gh API (to remove)
- generates working copy for each repository with the target branch
"""
by_repo: Mapping[Repository, List[PullRequests]] = \
dict(groupby(batches.prs, lambda p: p.repository))
staging_state = {}
original_heads = {}
for repo in target.project_id.repo_ids.having_branch(target):
gh = repo.github()
head = gh.head(target.name)
source = git.get_local(repo)
source.fetch(
git.source_url(repo),
# a full refspec is necessary to ensure we actually fetch the ref
# (not just the commit it points to) and update it.
# `git fetch $remote $branch` seems to work locally, but it might
# be hooked only to "proper" remote-tracking branches
# (in `refs/remotes`), it doesn't seem to work here
f'+refs/heads/{target.name}:refs/heads/{target.name}',
*(pr.head for pr in by_repo.get(repo, []))
)
original_heads[repo] = head
staging_state[repo] = StagingSlice(gh=gh, head=head, repo=source.stdout().with_config(text=True, check=False))
return original_heads, staging_state
def stage_batches(branch: Branch, batches: Batch, staging_state: StagingState) -> Stagings:
batch_limit = branch.project_id.batch_limit
env = branch.env
staged = env['runbot_merge.batch']
for batch in batches:
if len(staged) >= batch_limit:
break
try:
staged |= stage_batch(env, batch, staging_state)
except exceptions.MergeError as e:
pr = e.args[0]
_logger.info("Failed to stage %s into %s", pr.display_name, branch.name)
pr._message_log(body=f"Failed to stage into {branch.name}: {e}")
if not staged or isinstance(e, exceptions.Unmergeable):
if len(e.args) > 1 and e.args[1]:
reason = e.args[1]
else:
reason = e.__cause__ or e.__context__
# if the reason is a json document, assume it's a github error
# and try to extract the error message to give it to the user
with contextlib.suppress(Exception):
reason = json.loads(str(reason))['message'].lower()
pr.error = True
env.ref('runbot_merge.pr.merge.failed')._send(
repository=pr.repository,
pull_request=pr.number,
format_args={'pr': pr, 'reason': reason, 'exc': e},
)
return staged
refline = re.compile(rb'([\da-f]{40}) ([^\0\n]+)(\0.*)?\n?')
ZERO_REF = b'0'*40
def parse_refs_smart(read: Callable[[int], bytes]) -> Iterator[Tuple[str, str]]:
""" yields pkt-line data (bytes), or None for flush lines """
def read_line() -> Optional[bytes]:
length = int(read(4), 16)
if length == 0:
return None
return read(length - 4)
header = read_line()
assert header and header.rstrip() == b'# service=git-upload-pack', header
assert read_line() is None, "failed to find first flush line"
# read lines until second delimiter
for line in iter(read_line, None):
if line.startswith(ZERO_REF):
break # empty list (no refs)
m = refline.fullmatch(line)
assert m
yield m[1].decode(), m[2].decode()
UNCHECKABLE = ['merge_method', 'overrides', 'draft']
def stage_batch(env: api.Environment, batch: Batch, staging: StagingState):
"""Stages the batch represented by the ``prs`` recordset, onto the
current corresponding staging heads.
Alongside returning the newly created batch, updates ``staging[*].head``
in-place on success. On failure, the heads should not be touched.
May return an empty recordset on some non-fatal failures.
"""
new_heads: Dict[PullRequests, str] = {}
pr_fields = env['runbot_merge.pull_requests']._fields
for pr in batch.prs:
info = staging[pr.repository]
_logger.info(
"Staging pr %s for target %s; method=%s",
pr.display_name, pr.target.name,
pr.merge_method or (pr.squash and 'single') or None
)
try:
method, new_heads[pr] = stage(pr, info, related_prs=(batch.prs - pr))
_logger.info(
"Staged pr %s to %s by %s: %s -> %s",
pr.display_name, pr.target.name, method,
info.head, new_heads[pr]
)
except github.MergeError as e:
raise exceptions.MergeError(pr) from e
except exceptions.Mismatch as e:
diff = ''.join(Differ().compare(
list(format_for_difflib((n, v) for n, v, _ in e.args[1])),
list(format_for_difflib((n, v) for n, _, v in e.args[1])),
))
_logger.info("Failed to stage %s: data mismatch", pr.display_name)
pr._message_log(body=f"data mismatch before merge:\n{diff}")
env.ref('runbot_merge.pr.staging.mismatch')._send(
repository=pr.repository,
pull_request=pr.number,
format_args={
'pr': pr,
'mismatch': ', '.join(pr_fields[f].string for f in e.args[0]),
'diff': diff,
'unchecked': ', '.join(pr_fields[f].string for f in UNCHECKABLE)
}
)
return env['runbot_merge.batch']
# update meta to new heads
for pr, head in new_heads.items():
staging[pr.repository].head = head
return batch
def format_for_difflib(items: Iterator[Tuple[str, object]]) -> Iterator[str]:
""" Bit of a pain in the ass because difflib really wants
all lines to be newline-terminated, but not all values are
actual lines, and also needs to split multiline values.
"""
for name, value in items:
yield name + ':\n'
value = str(value)
if not value.endswith('\n'):
value += '\n'
yield from value.splitlines(keepends=True)
yield '\n'
Method = Literal['merge', 'rebase-merge', 'rebase-ff', 'squash']
def stage(pr: PullRequests, info: StagingSlice, related_prs: PullRequests) -> Tuple[Method, str]:
# nb: pr_commits is oldest to newest so pr.head is pr_commits[-1]
_, prdict = info.gh.pr(pr.number)
commits = prdict['commits']
method: Method = pr.merge_method or ('rebase-ff' if commits == 1 else None)
if commits > 50 and method.startswith('rebase'):
raise exceptions.Unmergeable(pr, "Rebasing 50 commits is too much.")
if commits > 250:
raise exceptions.Unmergeable(
pr, "Merging PRs of 250 or more commits is not supported "
"(https://developer.github.com/v3/pulls/#list-commits-on-a-pull-request)"
)
pr_commits = info.gh.commits(pr.number)
for c in pr_commits:
if not (c['commit']['author']['email'] and c['commit']['committer']['email']):
raise exceptions.Unmergeable(
pr,
f"All commits must have author and committer email, "
f"missing email on {c['sha']} indicates the authorship is "
f"most likely incorrect."
)
# sync and signal possibly missed updates
invalid = {}
diff = []
pr_head = pr_commits[-1]['sha']
if pr.head != pr_head:
invalid['head'] = pr_head
diff.append(('Head', pr.head, pr_head))
if pr.target.name != prdict['base']['ref']:
branch = pr.env['runbot_merge.branch'].with_context(active_test=False).search([
('name', '=', prdict['base']['ref']),
('project_id', '=', pr.repository.project_id.id),
])
if not branch:
pr.unlink()
raise exceptions.Unmergeable(pr, "While staging, found this PR had been retargeted to an un-managed branch.")
invalid['target'] = branch.id
diff.append(('Target branch', pr.target.name, branch.name))
if pr.squash != commits == 1:
invalid['squash'] = commits == 1
diff.append(('Single commit', pr.squash, commits == 1))
msg = utils.make_message(prdict)
if pr.message != msg:
invalid['message'] = msg
diff.append(('Message', pr.message, msg))
if invalid:
pr.write({**invalid, 'reviewed_by': False, 'head': pr_head})
raise exceptions.Mismatch(invalid, diff)
if pr.reviewed_by and pr.reviewed_by.name == pr.reviewed_by.github_login:
# XXX: find other trigger(s) to sync github name?
gh_name = info.gh.user(pr.reviewed_by.github_login)['name']
if gh_name:
pr.reviewed_by.name = gh_name
match method:
case 'merge':
fn = stage_merge
case 'rebase-merge':
fn = stage_rebase_merge
case 'rebase-ff':
fn = stage_rebase_ff
case 'squash':
fn = stage_squash
pr_base_tree = info.repo.get_tree(pr_commits[0]['parents'][0]['sha'])
pr_head_tree = pr_commits[-1]['commit']['tree']['sha']
merge_base_tree = info.repo.get_tree(info.head)
new_head = fn(pr, info, pr_commits, related_prs=related_prs)
merge_head_tree = info.repo.get_tree(new_head)
if pr_head_tree != pr_base_tree and merge_head_tree == merge_base_tree:
raise exceptions.MergeError(pr, f'results in an empty tree when merged, might be the duplicate of a merged PR.')
return method, new_head
def stage_squash(pr: PullRequests, info: StagingSlice, commits: List[github.PrCommit], related_prs: PullRequests) -> str:
msg = pr._build_message(pr, related_prs=related_prs)
authors = {
(c['commit']['author']['name'], c['commit']['author']['email'])
for c in commits
}
if len(authors) == 1:
author = authors.pop()
else:
msg.headers.extend(sorted(
('Co-Authored-By', "%s <%s>" % author)
for author in authors
))
author = (pr.repository.project_id.github_name, pr.repository.project_id.github_email)
committers = {
(c['commit']['committer']['name'], c['commit']['committer']['email'])
for c in commits
}
# should committers also be added to co-authors?
committer = committers.pop() if len(committers) == 1 else None
r = info.repo.merge_tree(info.head, pr.head)
if r.returncode:
raise exceptions.MergeError(pr, r.stderr)
merge_tree = r.stdout.strip()
r = info.repo.commit_tree(
tree=merge_tree,
parents=[info.head],
message=str(msg),
author=author,
committer=committer or author,
)
if r.returncode:
raise exceptions.MergeError(pr, r.stderr)
head = r.stdout.strip()
commits_map = {c['sha']: head for c in commits}
commits_map[''] = head
pr.commits_map = json.dumps(commits_map)
return head
def stage_rebase_ff(pr: PullRequests, info: StagingSlice, commits: List[github.PrCommit], related_prs: PullRequests) -> str:
add_self_references(pr, commits, related_prs=related_prs, merge=commits[-1])
_logger.debug("rebasing %s on %s (commits=%s)",
pr.display_name, info.head, len(commits))
head, mapping = info.repo.rebase(info.head, commits=commits)
pr.commits_map = json.dumps({**mapping, '': head})
return head
def stage_rebase_merge(pr: PullRequests, info: StagingSlice, commits: List[github.PrCommit], related_prs: PullRequests) -> str :
add_self_references(pr, commits, related_prs=related_prs)
_logger.debug("rebasing %s on %s (commits=%s)",
pr.display_name, info.head, len(commits))
h, mapping = info.repo.rebase(info.head, commits=commits)
msg = pr._build_message(pr, related_prs=related_prs)
project = pr.repository.project_id
merge_head= info.repo.merge(
info.head, h, str(msg),
author=(project.github_name, project.github_email),
)
pr.commits_map = json.dumps({**mapping, '': merge_head})
return merge_head
def stage_merge(pr: PullRequests, info: StagingSlice, commits: List[github.PrCommit], related_prs: PullRequests) -> str:
pr_head = commits[-1] # oldest to newest
base_commit = None
head_parents = {p['sha'] for p in pr_head['parents']}
if len(head_parents) > 1:
# look for parent(s?) of pr_head not in PR, means it's
# from target (so we merged target in pr)
merge = head_parents - {c['sha'] for c in commits}
external_parents = len(merge)
if external_parents > 1:
raise exceptions.Unmergeable(
"The PR head can only have one parent from the base branch "
"(not part of the PR itself), found %d: %s" % (
external_parents,
', '.join(merge)
))
if external_parents == 1:
[base_commit] = merge
commits_map = {c['sha']: c['sha'] for c in commits}
if base_commit:
# replicate pr_head with base_commit replaced by
# the current head
t = info.repo.merge_tree(info.head, pr_head['sha'])
if t.returncode:
raise exceptions.MergeError(pr, t.stderr)
merge_tree = t.stdout.strip()
new_parents = [info.head] + list(head_parents - {base_commit})
msg = pr._build_message(pr_head['commit']['message'], related_prs=related_prs)
d2t = itemgetter('name', 'email', 'date')
c = info.repo.commit_tree(
tree=merge_tree,
parents=new_parents,
message=str(msg),
author=d2t(pr_head['commit']['author']),
committer=d2t(pr_head['commit']['committer']),
)
if c.returncode:
raise exceptions.MergeError(pr, c.stderr)
copy = c.stdout.strip()
# merge commit *and old PR head* map to the pr head replica
commits_map[''] = commits_map[pr_head['sha']] = copy
pr.commits_map = json.dumps(commits_map)
return copy
else:
# otherwise do a regular merge
msg = pr._build_message(pr)
project = pr.repository.project_id
merge_head = info.repo.merge(
info.head, pr.head, str(msg),
author=(project.github_name, project.github_email),
)
# and the merge commit is the normal merge head
commits_map[''] = merge_head
pr.commits_map = json.dumps(commits_map)
return merge_head
def is_mentioned(message: Union[PullRequests, str], pr: PullRequests, *, full_reference: bool = False) -> bool:
"""Returns whether ``pr`` is mentioned in ``message```
"""
if full_reference:
pattern = fr'\b{re.escape(pr.display_name)}\b'
else:
repository = pr.repository.name # .replace('/', '\\/')
pattern = fr'( |\b{repository})#{pr.number}\b'
return bool(re.search(pattern, message if isinstance(message, str) else message.message))
def add_self_references(
pr: PullRequests,
commits: List[github.PrCommit],
related_prs: PullRequests,
merge: Optional[github.PrCommit] = None,
):
"""Adds a footer reference to ``self`` to all ``commits`` if they don't
already refer to the PR.
"""
for c in (c['commit'] for c in commits):
c['message'] = str(pr._build_message(
c['message'],
related_prs=related_prs,
merge=merge and c['url'] == merge['commit']['url'],
))
BREAK = re.compile(r'''
[ ]{0,3} # 0-3 spaces of indentation
# followed by a sequence of three or more matching -, _, or * characters,
# each followed optionally by any number of spaces or tabs
# so needs to start with a _, - or *, then have at least 2 more such
# interspersed with any number of spaces or tabs
([*_-])
([ \t]*\1){2,}
[ \t]*
''', flags=re.VERBOSE)
SETEX_UNDERLINE = re.compile(r'''
[ ]{0,3} # no more than 3 spaces indentation
[-=]+ # a sequence of = characters or a sequence of - characters
[ ]* # any number of trailing spaces
# we don't care about "a line containing a single -" because we want to
# disambiguate SETEX headings from thematic breaks, and thematic breaks have
# 3+ -. Doesn't look like GH interprets `- - -` as a line so yay...
''', flags=re.VERBOSE)
HEADER = re.compile('([A-Za-z-]+): (.*)')
class Message:
@classmethod
def from_message(cls, msg: Union[PullRequests, str]) -> 'Message':
in_headers = True
maybe_setex = None
# creating from PR message -> remove content following break
if isinstance(msg, str):
message, handle_break = (msg, False)
else:
message, handle_break = (msg.message, True)
headers = []
body: List[str] = []
# don't process the title (first line) of the commit message
lines = message.splitlines()
for line in reversed(lines[1:]):
if maybe_setex:
# NOTE: actually slightly more complicated: it's a SETEX heading
# only if preceding line(s) can be interpreted as a
# paragraph so e.g. a title followed by a line of dashes
# would indeed be a break, but this should be good enough
# for now, if we need more we'll need a full-blown
# markdown parser probably
if line: # actually a SETEX title -> add underline to body then process current
body.append(maybe_setex)
else: # actually break, remove body then process current
body = []
maybe_setex = None
if not line:
if not in_headers and body and body[-1]:
body.append(line)
continue
if handle_break and BREAK.fullmatch(line):
if SETEX_UNDERLINE.fullmatch(line):
maybe_setex = line
else:
body = []
continue
h = HEADER.fullmatch(line)
if h:
# c-a-b = special case from an existing test, not sure if actually useful?
if in_headers or h[1].lower() == 'co-authored-by':
headers.append(h.groups())
continue
body.append(line)
in_headers = False
# if there are non-title body lines, add a separation after the title
if body and body[-1]:
body.append('')
body.append(lines[0])
return cls('\n'.join(reversed(body)), Headers(reversed(headers)))
def __init__(self, body: str, headers: Optional[Headers] = None):
self.body = body
self.headers = headers or Headers()
def __setattr__(self, name, value):
# make sure stored body is always stripped
if name == 'body':
value = value and value.strip()
super().__setattr__(name, value)
def __str__(self):
if not self.headers:
return self.body.rstrip() + '\n'
with io.StringIO() as msg:
msg.write(self.body.rstrip())
msg.write('\n\n')
# https://git.wiki.kernel.org/index.php/CommitMessageConventions
# seems to mostly use capitalised names (rather than title-cased)
keys = list(OrderedSet(k.capitalize() for k in self.headers.keys()))
# c-a-b must be at the very end otherwise github doesn't see it
keys.sort(key=lambda k: k == 'Co-authored-by')
for k in keys:
for v in self.headers.getlist(k):
msg.write(k)
msg.write(': ')
msg.write(v)
msg.write('\n')
return msg.getvalue()

View File

@ -0,0 +1,201 @@
import logging
from contextvars import ContextVar
from typing import Tuple
from xml.etree.ElementTree import Element, tostring
import markdown.inlinepatterns
import markdown.treeprocessors
from markupsafe import escape, Markup
def enum(model: str, field: str) -> Tuple[str, str]:
n = f'{model.replace(".", "_")}_{field}_type'
return n, n
def readonly(_):
raise TypeError("Field is readonly")
DFM_CONTEXT_REPO = ContextVar("dfm_context", default="")
def dfm(repository: str, text: str) -> Markup:
""" Converts the input text from markup to HTML using the Odoo PR
Description Rules, which are basically:
- GFM
- minus raw HTML (?)
- + github's autolinking (https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/autolinked-references-and-urls)
- + bespoke autolinking of OPW and Task links to odoo.com
"""
t = DFM_CONTEXT_REPO.set(repository)
try:
return Markup(dfm_renderer.convert(escape(text)))
finally:
DFM_CONTEXT_REPO.reset(t)
class DfmExtension(markdown.extensions.Extension):
def extendMarkdown(self, md):
md.registerExtensions(['fenced_code', 'footnotes', 'nl2br', 'sane_lists', 'tables'], configs={})
md.inlinePatterns.register(GithubLinking(md), 'githublinking', 123)
md.inlinePatterns.register(OdooLinking(md), 'odoolinking', 124)
# ideally the unlinker should run before the prettifier so the
# prettification is done correctly, but it seems unlikely the prettifier
# handles the variable nature of links correctly, and we likely want to
# run after the unescaper
md.treeprocessors.register(Unlinker(), "unlinker", -10)
class GithubLinking(markdown.inlinepatterns.InlineProcessor):
"""Aside from being *very* varied github links are *contextual*. That is,
their resolution depends on the repository they're being called from
(technically they also need all the information from the github backend to
know the people & objects exist but we don't have that option).
Context is not available to us, but we can fake it through the application
of contextvars: ``DFM_CONTEXT_REPO`` should contain the full name of the
repository this is being resolved from.
If ``DFM_CONTEXT_REPO`` is empty and needed, this processor emits a warning.
"""
def __init__(self, md=None):
super().__init__(r"""(?xi)
(?:
\bhttps://github.com/([\w\.-]+/[\w\.-]+)/(?:issues|pull)/(\d+)(\#[\w-]+)?
| \bhttps://github.com/([\w\.-]+/[\w\.-]+)/commit/([a-f0-9]+)
| \b([\w\.-]+/[\w\.-]+)\#(\d+)
| (\bGH-|(?:^|(?<=\s))\#)(\d+)
| \b(?:
# user@sha or user/repo@sha
([\w\.-]+(?:/[\w\.-]+)?)
@
([0-9a-f]{7,40})
)
| \b(
# a sha is 7~40 hex digits but that means any million+ number matches
# which is probably wrong. So ensure there's at least one letter in the
# set by using a positive lookahead which looks for a sequence of at
# least 0 numbers followed by a-f
(?=[0-9]{0,39}?[a-f])
[0-9a-f]{7,40}
)
)
\b
""", md)
def handleMatch(self, m, data):
ctx = DFM_CONTEXT_REPO.get()
if not ctx:
logging.getLogger(__name__)\
.getChild("github_links")\
.warning("missing context for rewriting github links, skipping")
return m[0], *m.span()
repo = issue = commit = None
if m[2]: # full issue / PR
repo = m[1]
issue = m[2]
elif m[5]: # long hash
repo = m[4]
commit = m[5]
elif m[7]: # short issue with repo
repo = m[6]
issue = m[7]
elif m[9]: # short issue without repo
repo = None if m[8] == '#' else "GH"
issue = m[9]
elif m[11]: # medium hash
repo = m[10]
commit = m[11]
else: # hash only
commit = m[12]
el = Element("a")
if issue is not None:
if repo == "GH":
el.text = f"GH-{issue}"
repo = ctx
elif repo in (None, ctx):
repo = ctx
el.text = f"#{issue}"
else:
el.text = f"{repo}#{issue}"
if (fragment := m[3]) and fragment.startswith('#issuecomment-'):
el.text += ' (comment)'
else:
fragment = ''
el.set('href', f"https://github.com/{repo}/issues/{issue}{fragment}")
else:
if repo in (None, ctx):
label_repo = ""
repo = ctx
elif '/' not in repo: # owner-only
label_repo = repo
# NOTE: I assume in reality we're supposed to find the actual fork if unambiguous...
repo = repo + '/' + ctx.split('/')[-1]
elif repo.split('/')[-1] == ctx.split('/')[-1]:
# NOTE: here we assume if it's the same repo in a different owner it's a fork
label_repo = repo.split('/')[0]
else:
label_repo = repo
el.text = f"{label_repo}@{commit}" if label_repo else commit
el.set("href", f"https://github.com/{repo}/commit/{commit}")
return el, *m.span()
class OdooLinking(markdown.inlinepatterns.InlineProcessor):
def __init__(self, md=None):
# there are other weirder variations but fuck em, this matches
# "opw", "task", "task-id" or "taskid" followed by an optional - or :
# followed by digits
super().__init__(r"(?i)\b(task(?:-?id)?|opw)\s*[-:]?\s*(\d+)\b", md)
def handleMatch(self, m, data):
el = Element("a", href='https://www.odoo.com/web#model=project.task&id=' + m[2])
if m[1].lower() == 'opw':
el.text = f"opw-{m[2]}"
else:
el.text = f"task-{m[2]}"
return el, *m.span()
class Unlinker(markdown.treeprocessors.Treeprocessor):
def run(self, root):
# find all elements which contain a link, as ElementTree does not have
# parent links we can't really replace links in place
for parent in root.iterfind('.//*[a]'):
children = parent[:]
# can't use clear because that clears the attributes and tail/text
del parent[:]
for el in children:
if el.tag != 'a' or el.get('href', '').startswith(('https:', 'http:')):
parent.append(el)
continue
# this is a weird link, remove it
if el.text: # first attach its text to the previous element
if len(parent): # prev is not parent
parent[-1].tail = (parent[-1].tail or '') + el.text
else:
parent.text = (parent.text or '') + el.text
if len(el): # then unpack all its children
parent.extend(el[:])
if el.tail: # then attach tail to previous element
if len(parent): # prev is not parent
parent[-1].tail = (parent[-1].tail or '') + el.tail
else:
parent.text = (parent.text or '') + el.tail
return None
# alternatively, use cmarkgfm? The maintainer of py-gfm (impl'd over
# python-markdown) ultimately gave up, if apparently mostly due to pymarkdown's
# tendency to break its API all the time
dfm_renderer = markdown.Markdown(
extensions=[DfmExtension()],
output_format='html5',
)

View File

@ -9,15 +9,20 @@ access_runbot_merge_repository_status_admin,Admin access to repo statuses,model_
access_runbot_merge_branch_admin,Admin access to branches,model_runbot_merge_branch,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_pull_requests_admin,Admin access to PR,model_runbot_merge_pull_requests,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_pull_requests_tagging_admin,Admin access to tagging,model_runbot_merge_pull_requests_tagging,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_pull_requests_split_admin,Admin access to batch split wizard,model_runbot_merge_pull_requests_split_off,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_commit_admin,Admin access to commits,model_runbot_merge_commit,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_stagings_admin,Admin access to stagings,model_runbot_merge_stagings,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_stagings_heads_admin,Admin access to staging heads,model_runbot_merge_stagings_heads,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_stagings_commits_admin,Admin access to staging commits,model_runbot_merge_stagings_commits,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_stagings_cancel_admin,Admin access to cancelling stagings,model_runbot_merge_stagings_cancel,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_split_admin,Admin access to splits,model_runbot_merge_split,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_batch_admin,Admin access to batches,model_runbot_merge_batch,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_staging_batch_admin,Admin access to batch/staging link,model_runbot_merge_staging_batch,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_fetch_job_admin,Admin access to fetch jobs,model_runbot_merge_fetch_job,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_pull_requests_feedback_admin,Admin access to feedback,model_runbot_merge_pull_requests_feedback,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_review_rights,Admin access to review permissions,model_res_partner_review,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_review_override,Admin access to override permissions,model_res_partner_override,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_events_sources,Admin access to event sources,model_runbot_merge_events_sources,runbot_merge.group_admin,1,1,1,1
access_runbot_merge_project,User access to project,model_runbot_merge_project,base.group_user,1,0,0,0
access_runbot_merge_repository,User access to repo,model_runbot_merge_repository,base.group_user,1,0,0,0
access_runbot_merge_branch,User access to branches,model_runbot_merge_branch,base.group_user,1,0,0,0
@ -25,3 +30,5 @@ access_runbot_merge_pull_requests,User access to PR,model_runbot_merge_pull_requ
access_runbot_merge_pull_requests_feedback,Users have no reason to access feedback,model_runbot_merge_pull_requests_feedback,,0,0,0,0
access_runbot_merge_review_rights_2,Users can see partners,model_res_partner_review,base.group_user,1,0,0,0
access_runbot_merge_review_override_2,Users can see partners,model_res_partner_override,base.group_user,1,0,0,0
runbot_merge.access_runbot_merge_pull_requests_feedback_template,access_runbot_merge_pull_requests_feedback_template,runbot_merge.model_runbot_merge_pull_requests_feedback_template,base.group_system,1,1,0,0

1 id name model_id:id group_id:id perm_read perm_write perm_create perm_unlink
9 access_runbot_merge_branch_admin Admin access to branches model_runbot_merge_branch runbot_merge.group_admin 1 1 1 1
10 access_runbot_merge_pull_requests_admin Admin access to PR model_runbot_merge_pull_requests runbot_merge.group_admin 1 1 1 1
11 access_runbot_merge_pull_requests_tagging_admin Admin access to tagging model_runbot_merge_pull_requests_tagging runbot_merge.group_admin 1 1 1 1
12 access_runbot_merge_pull_requests_split_admin Admin access to batch split wizard model_runbot_merge_pull_requests_split_off runbot_merge.group_admin 1 1 1 1
13 access_runbot_merge_commit_admin Admin access to commits model_runbot_merge_commit runbot_merge.group_admin 1 1 1 1
14 access_runbot_merge_stagings_admin Admin access to stagings model_runbot_merge_stagings runbot_merge.group_admin 1 1 1 1
15 access_runbot_merge_stagings_heads_admin Admin access to staging heads model_runbot_merge_stagings_heads runbot_merge.group_admin 1 1 1 1
16 access_runbot_merge_stagings_commits_admin Admin access to staging commits model_runbot_merge_stagings_commits runbot_merge.group_admin 1 1 1 1
17 access_runbot_merge_stagings_cancel_admin Admin access to cancelling stagings model_runbot_merge_stagings_cancel runbot_merge.group_admin 1 1 1 1
18 access_runbot_merge_split_admin Admin access to splits model_runbot_merge_split runbot_merge.group_admin 1 1 1 1
19 access_runbot_merge_batch_admin Admin access to batches model_runbot_merge_batch runbot_merge.group_admin 1 1 1 1
20 access_runbot_merge_staging_batch_admin Admin access to batch/staging link model_runbot_merge_staging_batch runbot_merge.group_admin 1 1 1 1
21 access_runbot_merge_fetch_job_admin Admin access to fetch jobs model_runbot_merge_fetch_job runbot_merge.group_admin 1 1 1 1
22 access_runbot_merge_pull_requests_feedback_admin Admin access to feedback model_runbot_merge_pull_requests_feedback runbot_merge.group_admin 1 1 1 1
23 access_runbot_merge_review_rights Admin access to review permissions model_res_partner_review runbot_merge.group_admin 1 1 1 1
24 access_runbot_merge_review_override Admin access to override permissions model_res_partner_override runbot_merge.group_admin 1 1 1 1
25 access_runbot_merge_events_sources Admin access to event sources model_runbot_merge_events_sources runbot_merge.group_admin 1 1 1 1
26 access_runbot_merge_project User access to project model_runbot_merge_project base.group_user 1 0 0 0
27 access_runbot_merge_repository User access to repo model_runbot_merge_repository base.group_user 1 0 0 0
28 access_runbot_merge_branch User access to branches model_runbot_merge_branch base.group_user 1 0 0 0
30 access_runbot_merge_pull_requests_feedback Users have no reason to access feedback model_runbot_merge_pull_requests_feedback 0 0 0 0
31 access_runbot_merge_review_rights_2 Users can see partners model_res_partner_review base.group_user 1 0 0 0
32 access_runbot_merge_review_override_2 Users can see partners model_res_partner_override base.group_user 1 0 0 0
33 runbot_merge.access_runbot_merge_pull_requests_feedback_template access_runbot_merge_pull_requests_feedback_template runbot_merge.model_runbot_merge_pull_requests_feedback_template base.group_system 1 1 0 0
34

View File

@ -5,4 +5,7 @@
<record model="res.groups" id="base.group_system">
<field name="implied_ids" eval="[(4, ref('runbot_merge.group_admin'))]"/>
</record>
<record model="res.groups" id="status">
<field name="name">Mergebot Status Sender</field>
</record>
</odoo>

117
runbot_merge/sentry.py Normal file
View File

@ -0,0 +1,117 @@
import logging
from os import environ
import sentry_sdk
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
from odoo import http
from odoo.addons.base.models.ir_cron import ir_cron
from odoo.http import HttpDispatcher, JsonRPCDispatcher
from .exceptions import FastForwardError, MergeError, Unmergeable
def delegate(self, attr):
return getattr(self.app, attr)
SentryWsgiMiddleware.__getattr__ = delegate
def enable_sentry():
logger = logging.getLogger('runbot_merge')
dsn = environ.get('SENTRY_DSN')
if not dsn:
logger.info("No DSN found, skipping sentry...")
return
try:
setup_sentry(dsn)
except Exception:
logger.exception("DSN found, failed to enable sentry...")
else:
logger.info("DSN found, sentry enabled...")
def setup_sentry(dsn):
sentry_sdk.init(
dsn,
auto_session_tracking=False,
# traces_sample_rate=1.0,
integrations=[
# note: if the colorformatter is enabled, sentry gets lost
# and classifies everything as errors because it fails to
# properly classify levels as the colorformatter injects
# the ANSI color codes right into LogRecord.levelname
LoggingIntegration(level=logging.INFO, event_level=logging.WARNING),
],
before_send=event_filter,
# apparently not in my version of the sdk
# functions_to_trace = []
)
http.root = SentryWsgiMiddleware(http.root)
instrument_odoo()
def instrument_odoo():
"""Monkeypatches odoo core to copy odoo metadata into sentry for more
informative events
"""
# add user to wsgi request context
for d in [HttpDispatcher, JsonRPCDispatcher]:
def dispatch(self, endpoint, args, old_dispatch=d.dispatch):
if self.request.uid:
sentry_sdk.set_user({
'id': self.request.uid,
'email': self.request.env.user.email,
'username': self.request.env.user.login,
})
else:
sentry_sdk.set_user({'username': '<public>'})
return old_dispatch(self, endpoint, args)
d.dispatch = dispatch
# create transaction for tracking crons, add user to that
old_callback = ir_cron._callback
def _callback(self, cron_name, server_action_id, job_id):
sentry_sdk.start_transaction(name=f"cron {cron_name}")
sentry_sdk.set_user({
'id': self.env.user.id,
'email': self.env.user.email,
'username': self.env.user.login,
})
return old_callback(self, cron_name, server_action_id, job_id)
ir_cron._callback = _callback
dummy_record = logging.LogRecord(name="", level=logging.NOTSET, pathname='', lineno=0, msg='', args=(), exc_info=None)
# mapping of exception types to predicates, if the predicate returns `True` the
# exception event should be suppressed
SUPPRESS_EXCEPTION = {
# Someone else deciding to push directly to the branch (which is generally
# what leads to this error) is not really actionable.
#
# Other possibilities are more structural and thus we probably want to know:
# - other 422 Unprocessable github errors (likely config issues):
# - reference does not exist
# - object does not exist
# - object is not a commit
# - branch protection issue
# - timeout on ref update (github probably dying)
# - other HTTP error (also github probably dying)
#
# might be worth using richer exceptions to make this clearer, and easier to classify
FastForwardError: lambda e: 'not a fast forward' in str(e.__cause__),
# Git conflict when merging (or non-json response which is weird),
# notified on PR
MergeError: lambda _: True,
# Failed preconditions on merging, notified on PR
Unmergeable: lambda _: True,
}
def event_filter(event, hint):
# event['level'], event['logger'], event['logentry'], event['exception']
# known hints: log_record: LogRecord, exc_info: (type, BaseExeption, Traceback) | None
exc_info = hint.get('exc_info') or hint.get('log_record', dummy_record).exc_info
if exc_info:
etype, exc, _ = exc_info
if SUPPRESS_EXCEPTION.get(etype, lambda _: False)(exc):
return None

View File

@ -14,27 +14,30 @@ h1, h2, h3, h4, h5, h6{
margin-bottom: 0.33em;
}
h5 { font-size: 1em; }
.bg-success, .bg-info, .bg-warning, .bg-danger, .bg-gray-lighter {
.bg-success, .bg-info, .bg-warning, .bg-danger, .bg-gray-lighter,
.table-success, .table-info, .table-warning, .table-danger {
color: inherit;
}
.dropdown-item, .dropdown-menu, .dropdown-menu a {
color: inherit;
}
.bg-success {
background-color: #dff0d8 !important;
$mergebot-colors: ("success": #dff0d8, "danger": #f2dede, "warning": #fcf8e3, "info": #d9edf7);
@each $category, $color in $mergebot-colors {
.bg-#{$category} {
background-color: $color !important;
}
.table-#{$category} {
background-color: $color !important;
&.table-active {
background-color: scale-color($color, $lightness: -5%) !important;
}
}
}
.bg-unmerged {
background-color: #dcefe8 !important
}
.bg-info {
background-color: #d9edf7 !important;
}
.bg-warning {
background-color: #fcf8e3 !important;
}
.bg-danger {
background-color: #f2dede !important;
}
.list-inline {
margin-bottom: 10px;
}
@ -79,6 +82,11 @@ h5 { font-size: 1em; }
.batch a:not(:last-of-type) a:after {
content: ",";
}
button.dropdown-toggle {
text-align: left;
white-space: wrap;
}
}
.pr-listing > * { display: inline-block; }
.pr-awaiting { opacity: 0.8; }
@ -110,3 +118,27 @@ dl.runbot-merge-fields {
.staging-statuses {
cursor: wait;
}
/* forwardport */
.outstanding-partners > * {
@extend .pt-1;
// because there's a trailing space which is annoying to remove, which plays
// the role of padding-right
@extend .pl-1;
@extend .text-nowrap;
// works better for the left edge of the *box*
@extend .border-left;
}
// batches sequence table in PR dashboard: mostly uses (customised) bootstrap
// but some of the style is bespoke because inline styles don't work well with
// CSP
.closed {
text-decoration: line-through;
}
tr.inactive {
opacity: 0.5;
}
td.detached {
border-top: 2px solid map-get($theme-colors, "danger");
}

View File

@ -1,36 +1,9 @@
import pytest
import requests
@pytest.fixture()
def module():
return 'runbot_merge'
@pytest.fixture
def page(port):
s = requests.Session()
def get(url):
r = s.get('http://localhost:{}{}'.format(port, url))
r.raise_for_status()
return r.content
return get
@pytest.fixture
def default_crons():
return [
# env['runbot_merge.project']._check_fetch()
'runbot_merge.fetch_prs_cron',
# env['runbot_merge.commit']._notify()
'runbot_merge.process_updated_commits',
# env['runbot_merge.project']._check_stagings()
'runbot_merge.merge_cron',
# env['runbot_merge.project']._create_stagings()
'runbot_merge.staging_cron',
# env['runbot_merge.pull_requests']._check_linked_prs_statuses()
'runbot_merge.check_linked_prs_status',
# env['runbot_merge.pull_requests.feedback']._send()
'runbot_merge.feedback_cron',
]
@pytest.fixture
def project(env, config):
return env['runbot_merge.project'].create({
@ -39,3 +12,30 @@ def project(env, config):
'github_prefix': 'hansen',
'branch_ids': [(0, 0, {'name': 'master'})],
})
@pytest.fixture
def make_repo2(env, project, make_repo, users, setreviewers):
"""Layer over ``make_repo`` which also:
- adds the new repo to ``project`` (with no group and the ``'default'`` status required)
- sets the standard reviewers on the repo
- and creates an event source for the repo
"""
def mr(name):
r = make_repo(name)
rr = env['runbot_merge.repository'].create({
'project_id': project.id,
'name': r.name,
'group_id': False,
'required_statuses': 'default',
})
setreviewers(rr)
env['runbot_merge.events_sources'].create({'repository': r.name})
return r
return mr
@pytest.fixture
def repo(make_repo2):
return make_repo2('repo')

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,201 @@
"""This module tests edge cases specific to the batch objects themselves,
without wider relevance and thus other location.
"""
import pytest
from utils import Commit, to_pr, pr_page
def test_close_single(env, repo):
"""If a batch has a single PR and that PR gets closed, the batch should be
inactive *and* blocked.
"""
with repo:
repo.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master')
[c] = repo.make_commits('master', Commit('b', tree={"b": "b"}))
pr = repo.make_pr(head=c, target='master')
env.run_crons()
pr_id = to_pr(env, pr)
batch_id = pr_id.batch_id
assert pr_id.state == 'opened'
assert batch_id.blocked
Batches = env['runbot_merge.batch']
assert Batches.search_count([]) == 1
with repo:
pr.close()
assert pr_id.state == 'closed'
assert batch_id.all_prs == pr_id
assert batch_id.prs == pr_id.browse(())
assert batch_id.blocked == "all prs are closed"
assert not batch_id.active
assert Batches.search_count([]) == 0
def test_close_multiple(env, make_repo2):
Batches = env['runbot_merge.batch']
repo1 = make_repo2('wheee')
repo2 = make_repo2('wheeee')
with repo1:
repo1.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master')
repo1.make_commits('master', Commit('b', tree={"b": "b"}), ref='heads/a_pr')
pr1 = repo1.make_pr(head='a_pr', target='master')
with repo2:
repo2.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master')
repo2.make_commits('master', Commit('b', tree={"b": "b"}), ref='heads/a_pr')
pr2 = repo2.make_pr(head='a_pr', target='master')
pr1_id = to_pr(env, pr1)
pr2_id = to_pr(env, pr2)
batch_id = pr1_id.batch_id
assert pr2_id.batch_id == batch_id
assert pr1_id.state == 'opened'
assert pr2_id.state == 'opened'
assert batch_id.all_prs == pr1_id | pr2_id
assert batch_id.prs == pr1_id | pr2_id
assert batch_id.active
assert Batches.search_count([]) == 1
with repo1:
pr1.close()
assert pr1_id.state == 'closed'
assert pr2_id.state == 'opened'
assert batch_id.all_prs == pr1_id | pr2_id
assert batch_id.prs == pr2_id
assert batch_id.active
assert Batches.search_count([]) == 1
with repo2:
pr2.close()
assert pr1_id.state == 'closed'
assert pr2_id.state == 'closed'
assert batch_id.all_prs == pr1_id | pr2_id
assert batch_id.prs == env['runbot_merge.pull_requests'].browse(())
assert not batch_id.active
assert Batches.search_count([]) == 0
def test_inconsistent_target(env, project, make_repo2, users, page, config):
"""If a batch's PRs have inconsistent targets,
- only open PRs should count
- it should be clearly notified on the dash
- the dash should not get hopelessly lost
- there should be a wizard to split the batch / move a PR to a separate batch
"""
# region setup
Batches = env['runbot_merge.batch']
repo1 = make_repo2('whe')
repo2 = make_repo2('whee')
repo3 = make_repo2('wheee')
project.write({'branch_ids': [(0, 0, {'name': 'other'})]})
with repo1:
[m] = repo1.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master')
repo1.make_ref('heads/other', m)
repo1.make_commits('master', Commit('b', tree={"b": "b"}), ref='heads/a_pr')
pr1 = repo1.make_pr(head='a_pr', target='master')
repo1.make_commits('master', Commit('b', tree={"c": "c"}), ref='heads/something_else')
pr_other = repo1.make_pr(head='something_else', target='master')
with repo2:
[m] = repo2.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master')
repo2.make_ref("heads/other", m)
repo2.make_commits('master', Commit('b', tree={"b": "b"}), ref='heads/a_pr')
pr2 = repo2.make_pr(head='a_pr', target='master')
with repo3:
[m] = repo3.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master')
repo3.make_ref("heads/other", m)
repo3.make_commits('master', Commit('b', tree={"b": "b"}), ref='heads/a_pr')
pr3 = repo3.make_pr(head='a_pr', target='master')
assert repo1.owner == repo2.owner == repo3.owner
owner = repo1.owner
# endregion
# region closeable consistency
[b] = Batches.search([('all_prs.label', '=', f'{owner}:a_pr')])
assert b.target.name == 'master'
assert len(b.prs) == 3
assert len(b.all_prs) == 3
with repo3:
pr3.base = 'other'
assert b.target.name == False
assert len(b.prs) == 3
assert len(b.all_prs) == 3
with repo3:
pr3.close()
assert b.target.name == 'master'
assert len(b.prs) == 2
assert len(b.all_prs) == 3
# endregion
# region split batch
pr1_id = to_pr(env, pr1)
pr2_id = to_pr(env, pr2)
with repo2:
pr2.base = 'other'
pr2_dashboard = pr_page(page, pr2)
# The dashboard should have an alert
s = pr2_dashboard.cssselect('.alert.alert-danger')
assert s, "the dashboard should have an alert"
assert s[0].text_content().strip() == f"""\
Inconsistent targets:
{pr1_id.display_name} has target 'master'
{pr2_id.display_name} has target 'other'\
"""
assert not pr2_dashboard.cssselect('table'), "the batches table should be suppressed"
assert b.target.name == False
assert to_pr(env, pr_other).label == f'{owner}:something_else'
# try staging
with repo1:
pr1.post_comment("hansen r+", config['role_reviewer']['token'])
repo1.post_status(pr1.head, "success")
with repo2:
pr2.post_comment("hansen r+", config['role_reviewer']['token'])
repo2.post_status(pr2.head, "success")
env.run_crons()
assert not pr1_id.blocked
assert not pr2_id.blocked
assert b.blocked == "Multiple target branches: 'other, master'"
assert env['runbot_merge.stagings'].search_count([]) == 0
act = pr2_id.button_split()
assert act['type'] == 'ir.actions.act_window'
assert act['views'] == [[False, 'form']]
assert act['target'] == 'new'
w = env[act['res_model']].browse([act['res_id']])
w.new_label = f"{owner}:something_else"
with pytest.raises(Exception):
w.button_apply()
w.new_label = f"{owner}:blah-blah-blah"
w.button_apply()
assert pr2_id.label == f"{owner}:blah-blah-blah"
assert pr2_id.batch_id != to_pr(env, pr1).batch_id
assert b.target.name == 'master'
assert len(b.prs) == 1, "the PR has been moved off of this batch entirely"
assert len(b.all_prs) == 2
# endregion
assert not pr1_id.blocked
assert not pr1_id.batch_id.blocked
assert not pr2_id.blocked
assert not pr2_id.batch_id.blocked
env.run_crons()
assert env['runbot_merge.stagings'].search_count([])

View File

@ -2,28 +2,21 @@ import pytest
from utils import Commit
@pytest.fixture
def repo(env, project, make_repo, users, setreviewers):
r = make_repo('repo')
project.write({
'repo_ids': [(0, 0, {
'name': r.name,
'status_ids': [
(0, 0, {'context': 'ci'}),
# require the lint status on master
(0, 0, {
'context': 'lint',
'branch_filter': [('id', '=', project.branch_ids.id)]
}),
(0, 0, {'context': 'pr', 'stagings': False}),
(0, 0, {'context': 'staging', 'prs': False}),
]
})],
})
setreviewers(*project.repo_ids)
return r
def _setup_statuses(project, repo):
project.repo_ids.status_ids = [
(5, 0, 0),
(0, 0, {'context': 'ci'}),
# require the lint status on master
(0, 0, {
'context': 'lint',
'branch_filter': [('id', '=', project.branch_ids.id)]
}),
(0, 0, {'context': 'pr', 'stagings': False}),
(0, 0, {'context': 'staging', 'prs': False}),
]
@pytest.mark.usefixtures('_setup_statuses')
def test_status_applies(env, repo, config):
""" If branches are associated with a repo status, only those branch should
require the status on their PRs & stagings
@ -41,15 +34,15 @@ def test_status_applies(env, repo, config):
with repo:
repo.post_status(c, 'success', 'ci')
env.run_crons('runbot_merge.process_updated_commits')
env.run_crons(None)
assert pr_id.state == 'opened'
with repo:
repo.post_status(c, 'success', 'pr')
env.run_crons('runbot_merge.process_updated_commits')
env.run_crons(None)
assert pr_id.state == 'opened'
with repo:
repo.post_status(c, 'success', 'lint')
env.run_crons('runbot_merge.process_updated_commits')
env.run_crons(None)
assert pr_id.state == 'validated'
with repo:
@ -60,17 +53,18 @@ def test_status_applies(env, repo, config):
assert st.state == 'pending'
with repo:
repo.post_status('staging.master', 'success', 'ci')
env.run_crons('runbot_merge.process_updated_commits')
env.run_crons(None)
assert st.state == 'pending'
with repo:
repo.post_status('staging.master', 'success', 'lint')
env.run_crons('runbot_merge.process_updated_commits')
env.run_crons(None)
assert st.state == 'pending'
with repo:
repo.post_status('staging.master', 'success', 'staging')
env.run_crons('runbot_merge.process_updated_commits')
env.run_crons(None)
assert st.state == 'success'
@pytest.mark.usefixtures('_setup_statuses')
def test_status_skipped(env, project, repo, config):
""" Branches not associated with a repo status should not require the status
on their PRs or stagings
@ -90,11 +84,11 @@ def test_status_skipped(env, project, repo, config):
with repo:
repo.post_status(c, 'success', 'ci')
env.run_crons('runbot_merge.process_updated_commits')
env.run_crons(None)
assert pr_id.state == 'opened'
with repo:
repo.post_status(c, 'success', 'pr')
env.run_crons('runbot_merge.process_updated_commits')
env.run_crons(None)
assert pr_id.state == 'validated'
with repo:
@ -105,11 +99,11 @@ def test_status_skipped(env, project, repo, config):
assert st.state == 'pending'
with repo:
repo.post_status('staging.maintenance', 'success', 'staging')
env.run_crons('runbot_merge.process_updated_commits')
env.run_crons(None)
assert st.state == 'pending'
with repo:
repo.post_status('staging.maintenance', 'success', 'ci')
env.run_crons('runbot_merge.process_updated_commits')
env.run_crons(None)
assert st.state == 'success'
def test_pseudo_version_tag(env, project, make_repo, setreviewers, config):
@ -132,6 +126,7 @@ def test_pseudo_version_tag(env, project, make_repo, setreviewers, config):
],
})
setreviewers(*project.repo_ids)
env['runbot_merge.events_sources'].create({'repository': repo.name})
with repo:
[m] = repo.make_commits(None, Commit('c1', tree={'a': '1'}), ref='heads/master')

View File

@ -0,0 +1,72 @@
from odoo.addons.runbot_merge.models.utils import dfm
def test_odoo_links():
assert dfm("", "OPW-42") == '<p><a href="https://www.odoo.com/web#model=project.task&amp;id=42">opw-42</a></p>'
assert dfm("", "taskid : 42") == '<p><a href="https://www.odoo.com/web#model=project.task&amp;id=42">task-42</a></p>'
assert dfm("", "I was doing task foo") == '<p>I was doing task foo</p>'
assert dfm("", "Task 687d3") == "<p>Task 687d3</p>"
def p(*content):
return f'<p>{"".join(content)}</p>'
def a(label, url):
return f'<a href="{url}">{label}</a>'
def test_gh_issue_links():
# same-repository link
assert dfm("odoo/runbot", "thing thing #26") == p("thing thing ", a('#26', 'https://github.com/odoo/runbot/issues/26'))
assert dfm("odoo/runbot", "GH-26") == p(a('GH-26', 'https://github.com/odoo/runbot/issues/26'))
assert dfm(
"odoo/runbot", "https://github.com/odoo/runbot/issues/26"
) == p(a('#26', 'https://github.com/odoo/runbot/issues/26'))
# cross-repo link
assert dfm(
"odoo/runbot", "jlord/sheetsee.js#26"
) == p(a('jlord/sheetsee.js#26', 'https://github.com/jlord/sheetsee.js/issues/26'))
assert dfm(
"odoo/runbot", "https://github.com/jlord/sheetsee.js/pull/26"
) == p(a('jlord/sheetsee.js#26', 'https://github.com/jlord/sheetsee.js/issues/26'))
# cross-repo link with comment
assert dfm(
"odoo/runbot", "https://github.com/odoo/odoo/pull/173061#issuecomment-2227874482"
) == p(a("odoo/odoo#173061 (comment)", "https://github.com/odoo/odoo/issues/173061#issuecomment-2227874482"))
def test_gh_commit_link():
# same repository
assert dfm(
"odoo/runbot", "https://github.com/odoo/runbot/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e"
) == p(a("a5c3785ed8d6a35868bc169f07e40e889087fd2e", "https://github.com/odoo/runbot/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e"))
# cross fork
assert dfm(
"odoo/runbot", "jlord@a5c3785ed8d6a35868bc169f07e40e889087fd2e"
) == p(a("jlord@a5c3785ed8d6a35868bc169f07e40e889087fd2e", "https://github.com/jlord/runbot/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e"))
assert dfm(
"odoo/runbot", "https://github.com/jlord/runbot/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e"
) == p(a("jlord@a5c3785ed8d6a35868bc169f07e40e889087fd2e", "https://github.com/jlord/runbot/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e"))
# cross repo
assert dfm(
"odoo/runbot", "jlord/sheetsee.js@a5c3785ed8d6a35868bc169f07e40e889087fd2e"
) == p(a("jlord/sheetsee.js@a5c3785ed8d6a35868bc169f07e40e889087fd2e", "https://github.com/jlord/sheetsee.js/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e"))
assert dfm(
"odoo/runbot", "https://github.com/jlord/sheetsee.js/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e"
) == p(a("jlord/sheetsee.js@a5c3785ed8d6a35868bc169f07e40e889087fd2e", "https://github.com/jlord/sheetsee.js/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e"))
def test_standalone_hash():
assert dfm(
"odoo/runbot", "a5c3785ed8d6a35868bc169f07e40e889087fd2e"
) == p(a("a5c3785ed8d6a35868bc169f07e40e889087fd2e", "https://github.com/odoo/runbot/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e"))
assert dfm(
"odoo/runbot", "a5c3785ed8d6a35868bc169f07e4"
) == p(a("a5c3785ed8d6a35868bc169f07e4", "https://github.com/odoo/runbot/commit/a5c3785ed8d6a35868bc169f07e4"))
assert dfm(
"odoo/runbot", "a5c3785"
) == p(a("a5c3785", "https://github.com/odoo/runbot/commit/a5c3785"))
assert dfm(
"odoo/runbot", "a5c378"
) == p("a5c378")
def test_ignore_tel():
assert dfm("", "[ok](https://github.com)") == p(a("ok", "https://github.com"))
assert dfm("", "[nope](tel:+1-212-555-0100)") == "<p>nope</p>"
assert dfm("", "[lol](rdar://10198949)") == "<p>lol</p>"

View File

@ -1,9 +1,15 @@
import pytest
from utils import seen, Commit, pr_page
def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, config, users, page):
""" PRs to disabled branches are ignored, but what if the PR exists *before*
the branch is disabled?
"""
# run crons from template to clean up the queue before possibly creating
# new work
assert env['base'].run_crons()
repo = make_repo('repo')
project.branch_ids.sequence = 0
project.write({'branch_ids': [
@ -17,6 +23,7 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
'group_id': False,
})
setreviewers(*project.repo_ids)
env['runbot_merge.events_sources'].create({'repository': repo.name})
with repo:
[m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master')
@ -38,10 +45,21 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
staging_id = branch_id.active_staging_id
assert staging_id == pr_id.staging_id
# staging of `pr` should have generated a staging branch
_ = repo.get_ref('heads/staging.other')
# stagings should not need a tmp branch anymore, so this should not exist
with pytest.raises(AssertionError, match=r'Not Found'):
repo.get_ref('heads/tmp.other')
# disable branch "other"
branch_id.active = False
env.run_crons()
# triggered cleanup should have deleted the staging for the disabled `other`
# target branch
with pytest.raises(AssertionError, match=r'Not Found'):
repo.get_ref('heads/staging.other')
# the PR should not have been closed implicitly
assert pr_id.state == 'ready'
# but it should be unstaged
@ -50,20 +68,17 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
assert not branch_id.active_staging_id
assert staging_id.state == 'cancelled', \
"closing the PRs should have canceled the staging"
assert staging_id.reason == f"Target branch deactivated by 'admin'."
assert staging_id.reason == "Target branch deactivated by 'admin'."
p = pr_page(page, pr)
target = dict(zip(
(e.text for e in p.cssselect('dl.runbot-merge-fields dt')),
(p.cssselect('dl.runbot-merge-fields dd'))
))['target']
assert target.text_content() == 'other (inactive)'
assert target.get('class') == 'text-muted bg-warning'
[target] = p.cssselect('table tr.bg-info')
assert 'inactive' in target.classes
assert target[0].text_content() == "other"
assert pr.comments == [
(users['reviewer'], "hansen r+"),
seen(env, pr, users),
(users['user'], "Hey @%(user)s @%(reviewer)s the target branch 'other' has been disabled, you may want to close this PR." % users),
(users['user'], "@%(user)s @%(reviewer)s the target branch 'other' has been disabled, you may want to close this PR." % users),
]
with repo:
@ -81,6 +96,11 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
assert pr_id.target == env['runbot_merge.branch'].search([('name', '=', 'other2')])
assert pr_id.staging_id
# staging of `pr` should have generated a staging branch
_ = repo.get_ref('heads/staging.other2')
# stagings should not need a tmp branch anymore, so this should not exist
with pytest.raises(AssertionError, match=r'Not Found'):
repo.get_ref('heads/tmp.other2')
def test_new_pr_no_branch(env, project, make_repo, setreviewers, users):
""" A new PR to an *unknown* branch should be ignored and warn
@ -92,6 +112,7 @@ def test_new_pr_no_branch(env, project, make_repo, setreviewers, users):
'status_ids': [(0, 0, {'context': 'status'})]
})
setreviewers(*project.repo_ids)
env['runbot_merge.events_sources'].create({'repository': repo.name})
with repo:
[m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master')
@ -125,6 +146,7 @@ def test_new_pr_disabled_branch(env, project, make_repo, setreviewers, users):
'active': False,
})
setreviewers(*project.repo_ids)
env['runbot_merge.events_sources'].create({'repository': repo.name})
with repo:
[m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master')

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,5 @@
from operator import itemgetter
import requests
from utils import Commit, to_pr, seen
@ -50,7 +52,7 @@ def test_name_search(env):
prs = PRs.create({**baseline, 'number': 1964, 'label': 'victor:thump', 'head': 'a', 'message': 'x'})\
| PRs.create({**baseline, 'number': 1959, 'label': 'marcus:frankenstein', 'head': 'b', 'message': 'y'})\
| PRs.create({**baseline, 'number': 1969, 'label': 'victor:patch-1', 'head': 'c', 'message': 'z'})
pr0, pr1, pr2 = prs.name_get()
pr0, pr1, pr2 = [[pr.id, pr.display_name] for pr in prs]
assert PRs.name_search('1964') == [pr0]
assert PRs.name_search('1969') == [pr2]
@ -96,7 +98,7 @@ def test_unreviewer(env, project, port):
assert p.review_rights == env['res.partner.review']
def test_staging_post_update(env, project, make_repo, setreviewers, users, config):
def test_staging_post_update(env, repo, users, config):
"""Because statuses come from commits, it's possible to update the commits
of a staging after that staging has completed (one way or the other), either
by sending statuses directly (e.g. rebuilding, for non-deterministic errors)
@ -105,21 +107,13 @@ def test_staging_post_update(env, project, make_repo, setreviewers, users, confi
This makes post-mortem analysis quite confusing, so stagings should
"lock in" their statuses once they complete.
"""
repo = make_repo('repo')
project.write({'repo_ids': [(0, 0, {
'name': repo.name,
'group_id': False,
'required_statuses': 'legal/cla,ci/runbot'
})]})
setreviewers(*project.repo_ids)
with repo:
[m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref='heads/master')
repo.make_commits(m, Commit('thing', tree={'m': 'c'}), ref='heads/other')
pr = repo.make_pr(target='master', head='other')
repo.post_status(pr.head, 'success', 'ci/runbot')
repo.post_status(pr.head, 'success', 'legal/cla')
repo.post_status(pr.head, 'success')
pr.post_comment('hansen r+ rebase-merge', config['role_reviewer']['token'])
env.run_crons()
pr_id = to_pr(env, pr)
@ -128,18 +122,244 @@ def test_staging_post_update(env, project, make_repo, setreviewers, users, confi
staging_head = repo.commit('staging.master')
with repo:
repo.post_status(staging_head, 'failure', 'ci/runbot')
repo.post_status(staging_head, 'failure')
env.run_crons()
assert pr_id.state == 'error'
assert staging_id.state == 'failure'
assert staging_id.statuses == [
[repo.name, 'ci/runbot', 'failure', ''],
[repo.name, 'default', 'failure', ''],
]
with repo:
repo.post_status(staging_head, 'success', 'ci/runbot')
repo.post_status(staging_head, 'success')
env.run_crons()
assert staging_id.state == 'failure'
assert staging_id.statuses == [
[repo.name, 'ci/runbot', 'failure', ''],
[repo.name, 'default', 'failure', ''],
]
def test_merge_empty_commits(env, repo, users, config):
"""The mergebot should allow merging already-empty commits.
"""
with repo:
[m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref='heads/master')
repo.make_commits(m, Commit('thing1', tree={}), ref='heads/other1')
pr1 = repo.make_pr(target='master', head='other1')
repo.post_status(pr1.head, 'success')
pr1.post_comment('hansen r+', config['role_reviewer']['token'])
repo.make_commits(m, Commit('thing2', tree={}), ref='heads/other2')
pr2 = repo.make_pr(target='master', head='other2')
repo.post_status(pr2.head, 'success')
pr2.post_comment('hansen r+ rebase-ff', config['role_reviewer']['token'])
env.run_crons()
pr1_id = to_pr(env, pr1)
pr2_id = to_pr(env, pr2)
assert pr1_id.staging_id and pr2_id.staging_id
with repo:
repo.post_status('staging.master', 'success')
env.run_crons()
assert pr1_id.state == pr2_id.state == 'merged'
# log is most-recent-first (?)
commits = list(repo.log('master'))
head = repo.commit(commits[0]['sha'])
assert repo.read_tree(head) == {'m': 'm'}
assert commits[0]['commit']['message'].startswith('thing2')
assert commits[1]['commit']['message'].startswith('thing1')
assert commits[2]['commit']['message'] == 'initial'
def test_merge_emptying_commits(env, repo, users, config):
"""The mergebot should *not* allow merging non-empty commits which become
empty as part of the staging (rebasing)
"""
with repo:
[m, _] = repo.make_commits(
None,
Commit('initial', tree={'m': 'm'}),
Commit('second', tree={'m': 'c'}),
ref='heads/master',
)
[c1] = repo.make_commits(m, Commit('thing', tree={'m': 'c'}), ref='heads/branch1')
pr1 = repo.make_pr(target='master', head='branch1')
repo.post_status(pr1.head, 'success')
pr1.post_comment('hansen r+ rebase-ff', config['role_reviewer']['token'])
[_, c2] = repo.make_commits(
m,
Commit('thing1', tree={'c': 'c'}),
Commit('thing2', tree={'m': 'c'}),
ref='heads/branch2',
)
pr2 = repo.make_pr(target='master', head='branch2')
repo.post_status(pr2.head, 'success')
pr2.post_comment('hansen r+ rebase-ff', config['role_reviewer']['token'])
repo.make_commits(
m,
Commit('thing1', tree={'m': 'x'}),
Commit('thing2', tree={'m': 'c'}),
ref='heads/branch3',
)
pr3 = repo.make_pr(target='master', head='branch3')
repo.post_status(pr3.head, 'success')
pr3.post_comment('hansen r+ squash', config['role_reviewer']['token'])
env.run_crons()
ping = f"@{users['user']} @{users['reviewer']}"
# check that first / sole commit emptying is caught
pr1_id = to_pr(env, pr1)
assert not pr1_id.staging_id
assert pr1.comments[3:] == [
(users['user'], f"{ping} unable to stage: commit {c1} results in an empty tree when merged, it is likely a duplicate of a merged commit, rebase and remove.")
]
assert pr1_id.error
assert pr1_id.state == 'error'
# check that followup commit emptying is caught
pr2_id = to_pr(env, pr2)
assert not pr2_id.staging_id
assert pr2.comments[3:] == [
(users['user'], f"{ping} unable to stage: commit {c2} results in an empty tree when merged, it is likely a duplicate of a merged commit, rebase and remove.")
]
assert pr2_id.error
assert pr2_id.state == 'error'
# check that emptied squashed pr is caught
pr3_id = to_pr(env, pr3)
assert not pr3_id.staging_id
assert pr3.comments[3:] == [
(users['user'], f"{ping} unable to stage: results in an empty tree when merged, might be the duplicate of a merged PR.")
]
assert pr3_id.error
assert pr3_id.state == 'error'
# ensure the PR does not get re-staged since it's the first of the staging
# (it's the only one)
env.run_crons()
assert pr1.comments[3:] == [
(users['user'], f"{ping} unable to stage: commit {c1} results in an empty tree when merged, it is likely a duplicate of a merged commit, rebase and remove.")
]
assert len(pr2.comments) == 4
assert len(pr3.comments) == 4
def test_force_ready(env, repo, config):
with repo:
[m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref="heads/master")
repo.make_commits(m, Commit('first', tree={'m': 'c1'}), ref="heads/other")
pr = repo.make_pr(target='master', head='other')
env.run_crons()
pr_id = to_pr(env, pr)
pr_id.skipchecks = True
assert pr_id.state == 'ready'
assert pr_id.status == 'pending'
reviewer = env['res.users'].browse([env._uid]).partner_id
assert pr_id.reviewed_by == reviewer
def test_help(env, repo, config, users, partners):
with repo:
[m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref="heads/master")
repo.make_commits(m, Commit('first', tree={'m': 'c1'}), ref="heads/other")
pr = repo.make_pr(target='master', head='other')
env.run_crons()
for role in ['reviewer', 'self_reviewer', 'user', 'other']:
v = config[f'role_{role}']
with repo:
pr.post_comment("hansen help", v['token'])
with repo:
pr.post_comment("hansen r+ help", config['role_reviewer']['token'])
assert not partners['reviewer'].user_ids, "the reviewer should not be an internal user"
group_internal = env.ref("base.group_user")
group_admin = env.ref("runbot_merge.group_admin")
env['res.users'].create({
'partner_id': partners['reviewer'].id,
'login': 'reviewer',
'groups_id': [(4, group_internal.id, 0), (4, group_admin.id, 0)],
})
with repo:
pr.post_comment("hansen help", config['role_reviewer']['token'])
env.run_crons()
assert pr.comments == [
seen(env, pr, users),
(users['reviewer'], "hansen help"),
(users['self_reviewer'], "hansen help"),
(users['user'], "hansen help"),
(users['other'], "hansen help"),
(users['reviewer'], "hansen r+ help"),
(users['reviewer'], "hansen help"),
(users['user'], REVIEWER.format(user=users['reviewer'], skip="")),
(users['user'], RANDO.format(user=users['self_reviewer'])),
(users['user'], AUTHOR.format(user=users['user'])),
(users['user'], RANDO.format(user=users['other'])),
(users['user'],
REVIEWER.format(user=users['reviewer'], skip='')
+ "\n\nWarning: in invoking help, every other command has been ignored."),
(users['user'], REVIEWER.format(
user=users['reviewer'],
skip='|`skipchecks`|bypasses both statuses and review|\n',
)),
]
REVIEWER = """\
Currently available commands for @{user}:
|command||
|-|-|
|`help`|displays this help|
|`r(eview)+`|approves the PR, if it's a forwardport also approves all non-detached parents|
|`r(eview)=<number>`|only approves the specified parents|
|`fw=no`|does not forward-port this PR|
|`fw=default`|forward-ports this PR normally|
|`fw=skipci`|does not wait for a forward-port's statuses to succeed before creating the next one|
|`up to <branch>`|only ports this PR forward to the specified branch (included)|
|`merge`|integrate the PR with a simple merge commit, using the PR description as message|
|`rebase-merge`|rebases the PR on top of the target branch the integrates with a merge commit, using the PR description as message|
|`rebase-ff`|rebases the PR on top of the target branch, then fast-forwards|
|`squash`|squashes the PR as a single commit on the target branch, using the PR description as message|
|`delegate+`|grants approval rights to the PR author|
|`delegate=<...>`|grants approval rights on this PR to the specified github users|
|`default`|stages the PR normally|
|`priority`|tries to stage this PR first, then adds `default` PRs if the staging has room|
|`alone`|stages this PR only with other PRs of the same priority|
{skip}\
|`cancel=staging`|automatically cancels the current staging when this PR becomes ready|
|`check`|fetches or refreshes PR metadata, resets mergebot state|
Note: this help text is dynamic and will change with the state of the PR.\
"""
AUTHOR = """\
Currently available commands for @{user}:
|command||
|-|-|
|`help`|displays this help|
|`fw=no`|does not forward-port this PR|
|`up to <branch>`|only ports this PR forward to the specified branch (included)|
|`check`|fetches or refreshes PR metadata, resets mergebot state|
Note: this help text is dynamic and will change with the state of the PR.\
"""
RANDO = """\
Currently available commands for @{user}:
|command||
|-|-|
|`help`|displays this help|
Note: this help text is dynamic and will change with the state of the PR.\
"""

View File

@ -0,0 +1,128 @@
import datetime
import functools
from itertools import repeat
import pytest
from utils import Commit, to_pr, ensure_one
def test_disable_staging(env, project, repo, config):
"""In order to avoid issues of cron locking, as well as not disable staging
for every project when trying to freeze just one of them (cough cough), a
toggle is available on the project to skip staging for it.
"""
with repo:
[m] = repo.make_commits(None, Commit("m", tree={"a": "1"}), ref="heads/master")
[c] = repo.make_commits(m, Commit("c", tree={"a": "2"}), ref="heads/other")
pr = repo.make_pr(title="whatever", target="master", head="other")
pr.post_comment("hansen r+", config["role_reviewer"]['token'])
repo.post_status(c, "success")
env.run_crons()
pr_id = to_pr(env, pr)
staging_1 = pr_id.staging_id
assert staging_1.active
project.staging_enabled = False
staging_1.cancel("because")
env.run_crons()
assert staging_1.active is False
assert staging_1.state == "cancelled"
assert not pr_id.staging_id.active,\
"should not be re-staged, because staging has been disabled"
@pytest.mark.parametrize('mode,cutoff,second', [
# default mode, the second staging is the first half of the first staging
('default', 2, [0]),
# splits are right-biased (the midpoint is rounded down), so for odd
# staging sizes the first split is the smaller one
('default', 3, [0]),
# if the split results in ((1, 2), 1), largest stages the second
('largest', 3, [1, 2]),
# if the split results in ((1, 1), 2), largest stages the ready PRs
('largest', 2, [2, 3]),
# even if it's a small minority, ready selects the ready PR(s)
('ready', 3, [3]),
('ready', 2, [2, 3]),
])
def test_staging_priority(env, project, repo, config, mode, cutoff, second):
"""By default, unless a PR is prioritised as "alone" splits take priority
over new stagings.
*However* to try and maximise throughput in trying times, it's possible to
configure the project to prioritise either the largest staging (between spit
and ready batches), or to just prioritise new stagings.
"""
def select(prs, indices):
zero = env['runbot_merge.pull_requests']
filtered = (p for i, p in enumerate(prs) if i in indices)
return functools.reduce(lambda a, b: a | b, filtered, zero)
project.staging_priority = mode
# we need at least 3 PRs, two that we can split out, and one leftover
with repo:
[m] = repo.make_commits(None, Commit("m", tree={"ble": "1"}), ref="heads/master")
repo.make_commits(m, Commit("c", tree={"1": "1"}), ref="heads/pr1")
pr1 = repo.make_pr(title="whatever", target="master", head="pr1")
repo.make_commits(m, Commit("c", tree={"2": "2"}), ref="heads/pr2")
pr2 = repo.make_pr(title="whatever", target="master", head="pr2")
repo.make_commits(m, Commit("c", tree={"3": "3"}), ref="heads/pr3")
pr3 = repo.make_pr(title="whatever", target="master", head="pr3")
repo.make_commits(m, Commit("c", tree={"4": "4"}), ref="heads/pr4")
pr4 = repo.make_pr(title="whatever", target="master", head="pr4")
prs = [pr1, pr2, pr3, pr4]
pr_ids = functools.reduce(
lambda a, b: a | b,
map(to_pr, repeat(env), prs)
)
# ready the PRs for the initial staging (to split)
pre_cutoff = pr_ids[:cutoff]
with repo:
for pr, pr_id in zip(prs[:cutoff], pre_cutoff):
pr.post_comment('hansen r+', config['role_reviewer']['token'])
repo.post_status(pr_id.head, 'success')
env.run_crons()
# check they staged as expected
assert all(p.staging_id for p in pre_cutoff)
staging = ensure_one(env['runbot_merge.stagings'].search([]))
ensure_one(pre_cutoff.staging_id)
# ready the rest
with repo:
for pr, pr_id in zip(prs[cutoff:], pr_ids[cutoff:]):
pr.post_comment('hansen r+', config['role_reviewer']['token'])
repo.post_status(pr_id.head, 'success')
env.run_crons(None)
assert not pr_ids.filtered(lambda p: p.blocked)
# trigger a split
with repo:
repo.post_status('staging.master', 'failure')
# specifically delay creation of new staging to observe the failed
# staging's state and the splits
model, cron_id = env['ir.model.data'].check_object_reference('runbot_merge', 'staging_cron')
staging_cron = env[model].browse([cron_id])
staging_cron.active = False
env.run_crons(None)
assert not staging.active
assert not env['runbot_merge.stagings'].search([]).active
assert env['runbot_merge.split'].search_count([]) == 2
staging_cron.active = True
# manually trigger that cron, as having the cron disabled prevented the creation of the triggers entirely
env.run_crons('runbot_merge.staging_cron')
# check that st.pr_ids are the PRs we expect
st = env['runbot_merge.stagings'].search([])
assert st.pr_ids == select(pr_ids, second)

View File

@ -1,4 +1,3 @@
import pytest
import requests
GEORGE = {
@ -15,10 +14,8 @@ def test_basic_provisioning(env, port):
assert g.partner_id.name == GEORGE['name']
assert g.partner_id.github_login == GEORGE['github_login']
assert g.oauth_uid == GEORGE['sub']
(model, g_id) = env['ir.model.data']\
.check_object_reference('base', 'group_user')
assert model == 'res.groups'
assert g.groups_id.id == g_id, "check that users were provisioned as internal (not portal)"
internal = env.ref('base.group_user')
assert (g.groups_id & internal) == internal, "check that users were provisioned as internal (not portal)"
# repeated provisioning should be a no-op
r = provision_user(port, [GEORGE])
@ -32,24 +29,13 @@ def test_basic_provisioning(env, port):
r = provision_user(port, [dict(GEORGE, name="x", github_login="y", sub="42")])
assert r == [0, 1]
# can't fail anymore because github_login now used to look up the existing
# user
# with pytest.raises(Exception):
# provision_user(port, [{
# 'name': "other@example.org",
# 'email': "x",
# 'github_login': "y",
# 'sub': "42"
# }])
r = provision_user(port, [dict(GEORGE, active=False)])
assert r == [0, 1]
assert not env['res.users'].search([('login', '=', GEORGE['email'])])
assert env['res.partner'].search([('email', '=', GEORGE['email'])])
def test_upgrade_partner(env, port):
# If a partner exists for a github login (and / or email?) it can be
# upgraded by creating a user for it
# matching partner with an email but no github login
p = env['res.partner'].create({
'name': GEORGE['name'],
'email': GEORGE['email'],
@ -66,6 +52,7 @@ def test_upgrade_partner(env, port):
p.user_ids.unlink()
p.unlink()
# matching partner with a github login but no email
p = env['res.partner'].create({
'name': GEORGE['name'],
'github_login': GEORGE['github_login'],
@ -79,8 +66,47 @@ def test_upgrade_partner(env, port):
'email': GEORGE['email'],
}]
p.user_ids.unlink()
p.unlink()
# matching partner with a deactivated user
p.user_ids.active = False
r = provision_user(port, [GEORGE])
assert r == [0, 1]
assert len(p.user_ids) == 1, "provisioning should re-enable user"
assert p.user_ids.active
# matching deactivated partner (with a deactivated user)
p.user_ids.active = False
p.active = False
r = provision_user(port, [GEORGE])
assert r == [0, 1]
assert p.active, "provisioning should re-enable partner"
assert p.user_ids.active
def test_duplicates(env, port):
"""In case of duplicate data, the handler should probably not blow up, but
instead log a warning (so the data gets fixed eventually) and skip
"""
# dupe 1: old oauth signup account & github interaction account, provisioning
# prioritises the github account & tries to create a user for it, which
# fails because the signup account has the same oauth uid (probably)
env['res.partner'].create({'name': 'foo', 'github_login': 'foo'})
env['res.users'].create({'login': 'foo@example.com', 'name': 'foo', 'email': 'foo@example.com', 'oauth_provider_id': 1, 'oauth_uid': '42'})
assert provision_user(port, [{
'name': "foo",
'email': 'foo@example.com',
'github_login': 'foo',
'sub': '42'
}]) == [0, 0]
# dupe 2: old non-oauth signup account & github interaction account, same
# as previous except it breaks on the login instead of the oauth_uid
env['res.partner'].create({'name': 'bar', 'github_login': 'bar'})
env['res.users'].create({'login': 'bar@example.com', 'name': 'bar', 'email': 'bar@example.com'})
assert provision_user(port, [{
'name': "bar",
'email': 'bar@example.com',
'github_login': 'bar',
'sub': '43'
}]) == [0, 0]
def test_no_email(env, port):
""" Provisioning system should ignore email-less entries
@ -88,6 +114,81 @@ def test_no_email(env, port):
r = provision_user(port, [{**GEORGE, 'email': None}])
assert r == [0, 0]
def test_casing(env, port):
p = env['res.partner'].create({
'name': 'Bob',
'github_login': "Bob",
})
assert not p.user_ids
assert provision_user(port, [{
'name': "Bob Thebuilder",
'github_login': "bob",
'email': 'bob@example.org',
'sub': '5473634',
}]) == [1, 0]
assert p.user_ids.name == 'Bob Thebuilder'
assert p.user_ids.email == 'bob@example.org'
assert p.user_ids.oauth_uid == '5473634'
# should be written on the partner through the user
assert p.name == 'Bob Thebuilder'
assert p.email == 'bob@example.org'
assert p.github_login == 'bob'
def test_user_leaves_and_returns(env, port):
internal = env.ref('base.group_user')
portal = env.ref('base.group_portal')
categories = internal | portal | env.ref('base.group_public')
assert provision_user(port, [{
"name": "Bamien Douvy",
"github_login": "DouvyB",
"email": "bado@example.org",
"sub": "123456",
}]) == [1, 0]
p = env['res.partner'].search([('github_login', '=', "DouvyB")])
assert (p.user_ids.groups_id & categories) == internal
# bye bye 👋
requests.post(f'http://localhost:{port}/runbot_merge/remove_reviewers', json={
'jsonrpc': '2.0',
'id': None,
'method': 'call',
'params': {'github_logins': ['douvyb']},
})
assert (p.user_ids.groups_id & categories) == portal
assert p.email is False
# he's back ❤️
assert provision_user(port, [{
"name": "Bamien Douvy",
"github_login": "DouvyB",
"email": "bado@example.org",
"sub": "123456",
}]) == [0, 1]
assert (p.user_ids.groups_id & categories) == internal
assert p.email == 'bado@example.org'
def test_bulk_ops(env, port):
a, b = env['res.partner'].create([{
'name': "Bob",
'email': "bob@example.org",
'active': False,
}, {
'name': "Coc",
'email': "coc@example.org",
'active': False,
}])
assert a.active is b.active is False
assert provision_user(port, [
{'email': 'bob@example.org', 'github_login': 'xyz'},
{'email': 'coc@example.org', 'github_login': 'abc'},
]) == [2, 0]
assert a.users_id
assert b.users_id
assert a.active is b.active is True
def provision_user(port, users):
r = requests.post(f'http://localhost:{port}/runbot_merge/provision', json={
'jsonrpc': '2.0',
@ -97,6 +198,6 @@ def provision_user(port, users):
})
r.raise_for_status()
json = r.json()
assert 'error' not in json
assert 'error' not in json, json['error']['data']['debug']
return json['result']

View File

@ -0,0 +1,28 @@
from utils import Commit, to_pr
def test_staging_disabled_branch(env, project, repo, config):
"""Check that it's possible to disable staging on a specific branch
"""
project.branch_ids = [(0, 0, {
'name': 'other',
'staging_enabled': False,
})]
with repo:
[master_commit] = repo.make_commits(None, Commit("master", tree={'a': '1'}), ref="heads/master")
[c1] = repo.make_commits(master_commit, Commit("thing", tree={'a': '2'}), ref='heads/master-thing')
master_pr = repo.make_pr(title="whatever", target="master", head="master-thing")
master_pr.post_comment("hansen r+", config['role_reviewer']['token'])
repo.post_status(c1, 'success')
[other_commit] = repo.make_commits(None, Commit("other", tree={'b': '1'}), ref='heads/other')
[c2] = repo.make_commits(other_commit, Commit("thing", tree={'b': '2'}), ref='heads/other-thing')
other_pr = repo.make_pr(title="whatever", target="other", head="other-thing")
other_pr.post_comment("hansen r+", config['role_reviewer']['token'])
repo.post_status(c2, 'success')
env.run_crons()
assert to_pr(env, master_pr).staging_id, \
"master is allowed to stage, should be staged"
assert not to_pr(env, other_pr).staging_id, \
"other is *not* allowed to stage, should not be staged"

View File

@ -50,6 +50,7 @@ def test_basic(env, project, make_repo, users, setreviewers, config):
'status_ids': [(0, 0, {'context': 'l/int'})]
})
setreviewers(*project.repo_ids)
env['runbot_merge.events_sources'].create({'repository': repo.name})
# "other" can override the lint
env['res.partner'].create({
'name': config['role_other'].get('name', 'Other'),
@ -89,7 +90,7 @@ def test_basic(env, project, make_repo, users, setreviewers, config):
(users['reviewer'], 'hansen r+'),
seen(env, pr, users),
(users['reviewer'], 'hansen override=l/int'),
(users['user'], "I'm sorry, @{}: you are not allowed to override this status.".format(users['reviewer'])),
(users['user'], "@{} you are not allowed to override 'l/int'.".format(users['reviewer'])),
(users['other'], "hansen override=l/int"),
]
assert pr_id.statuses == '{}'
@ -110,6 +111,7 @@ def test_multiple(env, project, make_repo, users, setreviewers, config):
'status_ids': [(0, 0, {'context': 'l/int'}), (0, 0, {'context': 'c/i'})]
})
setreviewers(*project.repo_ids)
env['runbot_merge.events_sources'].create({'repository': repo.name})
# "other" can override the lints
env['res.partner'].create({
'name': config['role_other'].get('name', 'Other'),
@ -174,6 +176,7 @@ def test_no_repository(env, project, make_repo, users, setreviewers, config):
'status_ids': [(0, 0, {'context': 'l/int'})]
})
setreviewers(*project.repo_ids)
env['runbot_merge.events_sources'].create({'repository': repo.name})
# "other" can override the lint
env['res.partner'].create({
'name': config['role_other'].get('name', 'Other'),

View File

@ -3,7 +3,7 @@ import itertools
import time
def shorten(text_ish, length):
def shorten(text_ish, length, cont='...'):
""" If necessary, cuts-off the text or bytes input and appends ellipsis to
signal the cutoff, such that the result is below the provided length
(according to whatever "len" means on the text-ish so bytes or codepoints
@ -12,11 +12,10 @@ def shorten(text_ish, length):
if len(text_ish or ()) <= length:
return text_ish
cont = '...'
if isinstance(text_ish, bytes):
cont = cont.encode('ascii') # whatever
# add enough room for the ellipsis
return text_ish[:length-3] + cont
return text_ish[:length-len(cont)] + cont
BACKOFF_DELAYS = (0.1, 0.2, 0.4, 0.8, 1.6)
def backoff(func=None, *, delays=BACKOFF_DELAYS, exc=Exception):

View File

@ -0,0 +1,98 @@
<odoo>
<record id="runbot_merge_action_batches" model="ir.actions.act_window">
<field name="name">Batches</field>
<field name="res_model">runbot_merge.batch</field>
<field name="view_mode">tree,form</field>
</record>
<record id="runbot_merge_batch_search" model="ir.ui.view">
<field name="name">batches search</field>
<field name="model">runbot_merge.batch</field>
<field name="arch" type="xml">
<search>
<filter name="all" domain="['|', ('active', '=', True), ('active', '=', False)]"/>
<filter name="inactive" domain="[('active', '=', False)]"/>
<field name="name"/>
<field name="target"/>
<field name="id"/>
</search>
</field>
</record>
<record id="runbot_merge_batch_tree" model="ir.ui.view">
<field name="name">batches list</field>
<field name="model">runbot_merge.batch</field>
<field name="arch" type="xml">
<tree decoration-muted="not active">
<field name="id"/>
<field name="name"/>
<field name="target"/>
<field name="prs" widget="many2many_tags"/>
<field name="blocked"/>
<field name="active" invisible="1"/>
</tree>
</field>
</record>
<record id="runbot_merge_batch_form" model="ir.ui.view">
<field name="name">Batch form</field>
<field name="model">runbot_merge.batch</field>
<field name="arch" type="xml">
<form>
<sheet>
<div class="oe_title"><h1><field name="name"/></h1></div>
<group>
<group>
<field name="target"/>
<field name="merge_date"/>
<field name="priority" invisible="merge_date"/>
<field name="skipchecks" invisible="merge_date"/>
<field name="cancel_staging" invisible="merge_date"/>
<field name="fw_policy"/>
</group>
<group>
<field name="blocked"/>
</group>
</group>
<group string="Pull Requests">
<group colspan="4">
<field name="all_prs" nolabel="1" readonly="1">
<tree>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open PR"/>
<field name="display_name"/>
<field name="repository"/>
<field name="state"/>
</tree>
</field>
</group>
</group>
<group string="Genealogy">
<group colspan="4">
<field name="genealogy_ids" nolabel="1" readonly="1">
<tree decoration-muted="id == parent.id">
<button type="object" name="get_formview_action" icon="fa-external-link" title="open batch"/>
<field name="name"/>
<field name="target"/>
<field name="all_prs" widget="many2many_tags"/>
</tree>
</field>
</group>
</group>
<group string="Stagings">
<group colspan="4">
<field name="staging_ids" nolabel="1" readonly="1">
<tree>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open staging"/>
<field name="staged_at"/>
<field name="state"/>
<field name="reason"/>
</tree>
</field>
</group>
</group>
</sheet>
</form>
</field>
</record>
</odoo>

View File

@ -33,11 +33,59 @@
</field>
</record>
<menuitem name="Configuration" id="menu_configuration" parent="runbot_merge_menu"/>
<record id="action_feedback" model="ir.actions.act_window">
<field name="name">Feedback Templates tree</field>
<field name="res_model">runbot_merge.pull_requests.feedback.template</field>
</record>
<record id="tree_feedback" model="ir.ui.view">
<field name="name">Feedback Templates</field>
<field name="model">runbot_merge.pull_requests.feedback.template</field>
<field name="arch" type="xml">
<tree>
<field name="template"/>
<field name="help"/>
</tree>
</field>
</record>
<record id="form_feedback" model="ir.ui.view">
<field name="name">Feedback Templates form</field>
<field name="model">runbot_merge.pull_requests.feedback.template</field>
<field name="arch" type="xml">
<form>
<sheet>
<field name="help"/>
<field name="template"/>
</sheet>
<div class="oe_chatter">
<field name="message_ids"/>
</div>
</form>
</field>
</record>
<record id="action_events_sources" model="ir.actions.act_window">
<field name="name">Events Sources</field>
<field name="res_model">runbot_merge.events_sources</field>
</record>
<record id="tree_events_sources" model="ir.ui.view">
<field name="name">Events Sources List</field>
<field name="model">runbot_merge.events_sources</field>
<field name="arch" type="xml">
<tree editable="bottom">
<field name="repository"/>
<field name="secret"/>
</tree>
</field>
</record>
<menuitem name="Configuration" id="menu_configuration" parent="runbot_merge_menu">
<menuitem name="CI Overrides" id="menu_configuration_overrides"
parent="menu_configuration"
action="action_overrides"/>
<menuitem name="Review Rights" id="menu_configuration_review"
parent="menu_configuration"
action="action_review"/>
<menuitem name="Feedback Templates" id="menu_configuration_feedback"
action="action_feedback"/>
<menuitem name="Events Sources" id="menu_configuration_events_sources"
action="action_events_sources"/>
</menuitem>
</odoo>

View File

@ -20,6 +20,7 @@
<separator string="Required Statuses"/>
<field name="status_ids">
<tree editable="bottom">
<button type="object" name="get_formview_action" icon="fa-external-link" title="open status"/>
<field name="context"/>
<field name="branch_filter"/>
<field name="prs"/>
@ -30,6 +31,41 @@
</form>
</field>
</record>
<record id="runbot_merge_branch_form" model="ir.ui.view">
<field name="name">Branch Form</field>
<field name="model">runbot_merge.branch</field>
<field name="arch" type="xml">
<form>
<sheet>
<div class="oe_title">
<h1><field name="name"/></h1>
</div>
<group>
<group>
<field name="project_id" readonly="1"/>
<field name="sequence" readonly="1"/>
</group>
<group>
<field name="active"/>
<field name="staging_enabled"/>
</group>
</group>
<separator string="Stagings"/>
<group>
<field name="active_staging_id"/>
</group>
<field name="staging_ids" nolabel="1" readonly="1">
<tree default_order="staged_at desc">
<button type="object" name="get_formview_action" icon="fa-external-link" title="open staging"/>
<field name="id"/>
<field name="staged_at"/>
<field name="state"/>
</tree>
</field>
</sheet>
</form>
</field>
</record>
<record id="runbot_merge_action_projects" model="ir.actions.act_window">
<field name="name">Projects</field>
@ -52,9 +88,9 @@
name="open" string="Open"
domain="[('state', 'not in', ['merged', 'closed'])]"
/>
<field name="label"/>
<field name="number"/>
<field name="author"/>
<field name="label"/>
<field name="target"/>
<field name="repository"/>
<field name="state"/>
@ -87,50 +123,156 @@
<field name="model">runbot_merge.pull_requests</field>
<field name="arch" type="xml">
<form>
<header/>
<div class="o_form_statusbar">
<span class="o_statusbar_buttons">
<button type="object" name="button_split" string="Split Off"/>
<field name="github_url" widget="url" class="btn btn-secondary" text="Github"/>
<field name="url" widget="url" class="btn btn-secondary" text="Frontend"/>
</span>
</div>
<sheet>
<field name="project" invisible="1"/>
<field name="target_sequence" invisible="1"/>
<div class="oe_title">
<h1>
<field name="repository"/>#<field name="number"/>
</h1>
<h2>
<field name="state"/>
<span invisible="state == 'merged' or not blocked">
(blocked: <field name="blocked"/>)
</span>
<span invisible="state != 'merged'">
(<field name="merge_date"/>)
</span>
</h2>
</div>
<group>
<!-- main PR metadata -->
<group name="metadata">
<group>
<field name="batch_id"/>
<field name="target"/>
<field name="state"/>
<field name="author"/>
</group>
<group>
<field name="label"/>
<field name="priority"/>
<field name="squash"/>
</group>
</group>
<group>
<group colspan="4">
<field name="author"/>
<field name="head"/>
<field name="statuses"/>
</group>
<group colspan="4">
<field name="overrides"/>
</group>
</group>
<group>
<group colspan="4" string="Message">
<notebook>
<page name="state" string="State">
<group>
<group>
<field name="reviewed_by"/>
<field name="closed"/>
<field name="error"/>
</group>
<group>
<field name="status"/>
<details colspan="4">
<summary>Commit Statuses</summary>
<field name="statuses"/>
</details>
<details colspan="4">
<summary>Overrides</summary>
<field name="overrides"/>
</details>
</group>
</group>
<group>
<group colspan="4">
<field name="blocked"/>
</group>
</group>
</page>
<page name="configuration" string="Configuration">
<group>
<group>
<field name="merge_method"/>
<field name="squash"/>
<field name="draft"/>
</group>
<group>
<field name="priority"/>
<field name="skipchecks" widget="boolean_toggle"/>
<field name="cancel_staging" widget="boolean_toggle"/>
</group>
</group>
<group string="Delegates">
<group colspan="4">
<field name="delegates" nolabel="1">
<tree>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open partner"/>
<field name="name"/>
<field name="github_login"/>
</tree>
</field>
</group>
</group>
</page>
<page name="stagings" string="Staging History">
<group>
<group colspan="4">
<field name="staging_ids" nolabel="1" readonly="1">
<tree>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open staging"/>
<field name="staged_at"/>
<field name="state"/>
<field name="reason"/>
</tree>
</field>
</group>
</group>
</page>
<page name="porting" string="Forward-Porting">
<group>
<group>
<field name="limit_id" domain="[('project_id', '=', project), ('sequence', '&lt;=', target_sequence)]"/>
<field string="Original PR" name="source_id"/>
<field name="parent_id"/>
<field
invisible="source_id and not parent_id"
string="Detached because" name="detach_reason" readonly="1"/>
</group>
</group>
<group>
<group colspan="4">
<field name="forwardport_ids" nolabel="1" readonly="True">
<tree>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open PR"/>
<field name="target" string="Branch"/>
<field name="number"/>
</tree>
</field>
</group>
</group>
</page>
</notebook>
<!-- influencers -->
<group string="Message">
<group colspan="4">
<field name="message" nolabel="1"/>
</group>
</group>
<group>
<group colspan="4" string="Delegates">
<field name="delegates" nolabel="1">
<tree>
<field name="name"/>
<field name="github_login"/>
</tree>
</field>
</group>
</group>
</sheet>
<div class="oe_chatter">
<field name="message_follower_ids" widget="mail_followers"/>
<field name="message_ids" widget="mail_thread"/>
</div>
</form>
</field>
</record>
<record id="runbot_merge_pull_requests_split_off_form" model="ir.ui.view">
<field name="name">Split Off Form</field>
<field name="model">runbot_merge.pull_requests.split_off</field>
<field name="arch" type="xml">
<form>
<field name="new_label" colspan="4"/>
<footer>
<button type="object" name="button_apply" string="Apply" class="btn btn-primary"/>
<button special="cancel" string="Cancel" class="btn btn-secondary"/>
</footer>
</form>
</field>
</record>
@ -175,7 +317,7 @@
<field name="active" invisible="1"/>
<header>
<button type="object" name="action_cancel" string="Cancel" class="oe_highlight"
attrs="{'invisible': [('active', '=', False)]}"
invisible="not active"
/>
</header>
<sheet>
@ -187,21 +329,44 @@
</group>
<group>
<field name="staged_at"/>
<field string="Staging Duration (seconds)"
name="staging_duration" widget="integer"/>
</group>
</group>
<group string="Heads">
<field name="head_ids" colspan="4" nolabel="1">
<group>
<group string="Heads">
<field name="head_ids" colspan="2" nolabel="1" readonly="1">
<tree>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open head"/>
<field name="sha"/>
</tree>
</field>
</group>
<group string="Commits">
<field name="commit_ids" colspan="2" nolabel="1" readonly="1">
<tree>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open commit"/>
<field name="sha"/>
</tree>
</field>
</group>
</group>
<group string="Batches">
<field name="batch_ids" colspan="4" nolabel="1" readonly="1">
<tree>
<field name="sha"/>
<field name="statuses"/>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open batch"/>
<field name="name"/>
<field name="prs" widget="many2many_tags"/>
</tree>
</field>
</group>
<group string="Batches">
<field name="batch_ids" colspan="4" nolabel="1">
<group string="PRs">
<field name="pr_ids" colspan="4" nolabel="1" readonly="1">
<tree>
<field name="prs" widget="many2many_tags"
options="{'no_quick_create': True}"/>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open pr"/>
<field name="display_name"/>
<field name="github_url" widget="url"/>
<field name="url" widget="url"/>
</tree>
</field>
</group>
@ -221,22 +386,62 @@
<field name="arch" type="xml">
<tree>
<field name="sha"/>
<field name="statuses"/>
</tree>
</field>
</record>
<record id="runbot_merge_commits_form" model="ir.ui.view">
<field name="name">commits form</field>
<field name="model">runbot_merge.commit</field>
<field name="arch" type="xml">
<form>
<sheet>
<div class="oe_title">
<h1><field name="sha"/></h1>
</div>
<field name="statuses" widget="json"/>
<separator string="Pull Requests"/>
<field name="pull_requests" nolabel="1">
<tree>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open pr"/>
<field name="display_name"/>
<field name="state"/>
</tree>
</field>
<separator string="Stagings (commits)"/>
<field name="commit_ids" nolabel="1" readonly="1">
<tree>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open commit"/>
<field name="target"/>
<field name="id"/>
<field name="staged_at"/>
<field name="state"/>
</tree>
</field>
<separator string="Stagings (heads)"/>
<field name="head_ids" nolabel="1" readonly="1">
<tree>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open head"/>
<field name="target"/>
<field name="id"/>
<field name="staged_at"/>
<field name="state"/>
</tree>
</field>
</sheet>
</form>
</field>
</record>
<menuitem name="Mergebot" id="runbot_merge_menu"/>
<menuitem name="Projects" id="runbot_merge_menu_project"
parent="runbot_merge_menu"
action="runbot_merge_action_projects"/>
<menuitem name="Mergebot" id="runbot_merge_menu">
<menuitem name="Pull Requests" id="runbot_merge_menu_prs"
parent="runbot_merge_menu"
action="runbot_merge_action_prs"/>
action="runbot_merge_action_prs" sequence="5"/>
<menuitem name="Stagings" id="runbot_merge_menu_stagings"
parent="runbot_merge_menu"
action="runbot_merge_action_stagings"/>
action="runbot_merge_action_stagings" sequence="8"/>
<menuitem name="Projects" id="runbot_merge_menu_project"
action="runbot_merge_action_projects"/>
<menuitem name="Batches" id="runbot_merge_menu_batches"
action="runbot_merge_action_batches"/>
<menuitem name="Commits" id="runbot_merge_menu_commits"
parent="runbot_merge_menu"
action="runbot_merge_action_commits"/>
</menuitem>
</odoo>

View File

@ -1,5 +1,5 @@
<odoo>
<record id="runbot_merge_tree_partner" model="ir.ui.view">
<record id="runbot_merge_search_partner" model="ir.ui.view">
<field name="name">Improve search on partners</field>
<field name="model">res.partner</field>
<field name="inherit_id" ref="base.view_res_partner_filter"/>
@ -25,6 +25,7 @@
<field name="display_name" string="Name"/>
<field name="github_login"/>
<field name="review_rights" widget="many2many_tags"/>
<field name="user_id" invisible="1"/>
</tree>
</xpath>
</field>
@ -35,20 +36,23 @@
<field name="inherit_id" ref="base.view_partner_form"/>
<field name="arch" type="xml">
<xpath expr="//sheet" position="before">
<field name="github_login" invisible="1"/>
<field name="review_rights" invisible="1"/>
<header>
<button type="object" name="fetch_github_email"
string="Fetch Github Email" class="oe_highlight"
attrs="{'invisible': ['|', ('email', '!=', False), ('github_login', '=', False)]}"
invisible="email or not github_login"
/>
</header>
<div class="alert alert-warning" role="alert"
attrs="{'invisible': ['|', ('email', '!=', False), ('review_rights', '=', [])]}">
invisible="email or not review_rights">
Reviewers must have an email address set! Without an email
configured, reviews will be ignored.
</div>
</xpath>
<xpath expr="//notebook" position="inside">
<page string="Mergebot" groups="runbot_merge.group_admin">
<field name="override_sensitive" invisible="1"/>
<group>
<group>
<field name="github_login"/>
@ -56,7 +60,12 @@
</group>
<group>
<group colspan="4" string="Review Rights">
<field name="review_rights" nolabel="1">
<div colspan="4" class="alert alert-warning" role="alert" invisible="not review_rights">
Review access requires successfully following
the Code Review (QDP) and Security (DLE)
trainings. Please check before giving r+ access.
</div>
<field colspan="4" name="review_rights" nolabel="1">
<tree string="Review ACLs" editable="bottom">
<field name="repository_id"/>
<field name="review"/>
@ -65,7 +74,12 @@
</field>
</group>
<group colspan="4">
<field name="override_rights" widget="many2many_tags"/>
<div colspan="4" class="alert alert-danger" role="alert" invisible="not override_sensitive">
Security Override <b>REQUIRES</b> successfully
following the Security training. Please ask DLE
before granting access.
</div>
<field colspan="4" name="override_rights" widget="many2many_tags"/>
</group>
</group>
<group>

View File

@ -8,10 +8,10 @@
<header>
<button type="object" name="action_prepare_freeze"
string="Freeze"
attrs="{'invisible': [('freeze_id', '!=', False)]}"/>
invisible="freeze_id"/>
<button type="object" name="action_prepare_freeze"
string="View Freeze" class="oe_highlight"
attrs="{'invisible': [('freeze_id', '=', False)]}"/>
invisible="not freeze_id"/>
</header>
<sheet>
<div class="oe_title">
@ -25,9 +25,21 @@
<group>
<group>
<field name="github_token"/>
<field name="secret"/>
<field name="github_name" readonly="0"
help="Identity when creating new commits, defaults to github name, falls back to login."/>
<field name="github_email" readonly="0"
help="Identity when creating new commits, defaults to public email, falls back to primary email."/>
<span invisible="not (staging_statuses and staging_rpc)" class="alert alert-warning" role="alert">
Avoid overlaps between GH and RPC as the older
GH statuses may overwrite more recent RPC statuses.
</span>
<field name="staging_statuses" string="Validate via GH statuses"/>
<field name="staging_rpc" string="Validate via direct RPC"/>
</group>
<group>
<field name="staging_enabled" widget="boolean_toggle"/>
<field name="staging_priority"/>
<field name="uniquifier"/>
<field name="ci_timeout"/>
<field name="batch_limit"/>
</group>
@ -46,6 +58,7 @@
<field name="repo_ids">
<tree>
<field name="sequence" widget="handle"/>
<button type="object" name="get_formview_action" icon="fa-external-link" title="open repo"/>
<field name="name"/>
<field name="branch_filter"/>
<field name="status_ids" widget="many2many_tags"/>
@ -55,8 +68,10 @@
<field name="branch_ids">
<tree editable="bottom" decoration-muted="not active">
<field name="sequence" widget="handle" />
<button type="object" name="get_formview_action" icon="fa-external-link" title="open branch"/>
<field name="name"/>
<field name="active"/>
<field name="active" widget="boolean_toggle"/>
<field name="staging_enabled" widget="boolean_toggle"/>
</tree>
</field>
</sheet>

View File

@ -7,11 +7,19 @@
<template id="link-pr" name="create a link to `pr`">
<t t-set="title">
<t t-if="pr.repository.group_id &lt;= env.user.groups_id">
<t t-esc="pr.message.split('\n')[0]"/>
<t t-out="pr.message.split('\n', 1)[0]"/>
</t>
</t>
<t t-set="title">
<t t-if="title.strip() and pr.blocked" >
<t t-out="title.strip()"/>: <t t-out="pr.blocked"/>
</t>
<t t-else="">
<t t-out="pr.blocked or title.strip()"/>
</t>
</t>
<a t-attf-href="https://github.com/{{ pr.repository.name }}/pull/{{ pr.number }}"
t-att-title="pr.blocked or title.strip()"
t-att-title="title"
t-att-target="target or None"
t-att-class="classes or None"
><t t-esc="pr.display_name"/></a>
@ -24,7 +32,7 @@
data-toggle="dropdown"
aria-haspopup="true"
aria-expanded="true"
t-attf-title="Staged at {{staging.staged_at}}Z"
t-attf-title="Staged at {{staging.staged_at}}Z for {{round(staging.staging_duration)}}s"
>
<t t-out="0"/>
<span class="caret"></span>
@ -72,7 +80,7 @@
<t t-call="website.layout">
<div id="wrap"><div class="container-fluid">
<t t-call="runbot_merge.alerts"/>
<section t-foreach="projects.with_context(active_test=False)" t-as="project" class="row">
<section t-foreach="projects" t-as="project" class="row">
<h1 class="col-md-12"><t t-esc="project.name"/></h1>
<div class="col-md-12">
key:
@ -154,7 +162,7 @@
<template id="stagings" name="mergebot branch stagings">
<t t-set="repo_statuses" t-value="branch.project_id.repo_ids.having_branch(branch).status_ids"/>
<ul class="list-unstyled stagings">
<t t-foreach="branch.env['runbot_merge.stagings'].search([('target', '=', branch.id)], order='staged_at desc', limit=6)" t-as="staging">
<t t-foreach="stagings_map[branch]" t-as="staging">
<t t-set="success" t-value="staging.state == 'success'"/>
<t t-set="failure" t-value="staging.state == 'failure'"/>
<t t-set="pending" t-value="staging.active and (not staging.state or staging.state == 'pending')"/>
@ -187,6 +195,11 @@
</ul>
<t t-call="runbot_merge.staging-statuses">
Staged <span t-field="staging.staged_at" t-options="{'widget': 'relative'}"/>
(duration <span t-field="staging.staging_duration" t-options="{
'widget': 'duration',
'format': 'short',
'round': 'minute'
}"/>)
</t>
</li>
</t>
@ -199,6 +212,18 @@
<section class="row">
<h1 class="col-md-12"><t t-esc="branch.project_id.name"/>: <t t-esc="branch.name"/></h1>
</section>
<form method="get">
<label for="until">Staged before:</label>
<input type="datetime-local" name="until" t-att-value="until"/>
(UTC)
<label for="state">State:</label>
<select name="state">
<option t-att-selected="'selected' if not state else None"/>
<option t-att-selected="'selected' if state == 'success' else None" value="success">Success</option>
<option t-att-selected="'selected' if state == 'failure' else None" value="failure">Failure</option>
</select>
<button type="submit">Apply</button>
</form>
<table>
<t t-foreach="stagings" t-as="staging">
<t t-set="success"
@ -236,6 +261,11 @@
<t t-call="runbot_merge.staging-statuses">
<span t-field="staging.staged_at"
t-options="{'format': 'yyyy-MM-dd\'T\'HH:mm:ssZ'}"/>
in <span t-field="staging.staging_duration" t-options="{
'widget': 'duration',
'format': 'narrow',
'round': 'minute'
}"/>
</t>
</th>
<td>
@ -270,7 +300,7 @@
</t>
</table>
<t t-if="next">
<a t-attf-href="/runbot_merge/{{branch.id}}?until={{next}}">
<a t-attf-href="/runbot_merge/{{branch.id}}?until={{next}}&amp;state={{state}}">
Next >
</a>
</t>
@ -299,6 +329,15 @@
<t t-if="merged_head">
at <a t-attf-href="https://github.com/{{pr.repository.name}}/commit/{{merged_head}}"><t t-esc="merged_head"/></a>
</t>
<p>Statuses:</p>
<ul>
<t t-foreach="pr.repository.status_ids._for_pr(pr)" t-as="ci">
<t t-set="st" t-value="statuses.get(ci.context.strip())"/>
<li t-if="st">
<a t-att-href="st.get('target_url') if st else None"><t t-esc="ci.context.strip()"/></a><t t-if="st and st.get('description')">: <t t-esc="st['description']"/></t>
</li>
</t>
</ul>
<t t-set="linked_prs" t-value="pr._linked_prs"/>
<div t-if="linked_prs">
@ -319,7 +358,7 @@
<template id="view_pull_request_info_error">
<div class="alert alert-danger">
Error:
<span t-esc="pr.with_context(active_test=False).batch_ids[-1:].staging_id.reason">
<span t-esc="pr.with_context(active_test=False).batch_id.staging_ids[-1:].reason">
Unable to stage PR
</span>
</div>
@ -391,7 +430,7 @@
</a>
<a t-attf-href="/web#view_type=form&amp;model=runbot_merge.pull_requests&amp;id={{pr.id}}"
class="btn btn-sm btn-secondary align-top float-right"
groups="base.group_user">View in backend</a>
groups="runbot_merge.group_admin">View in backend</a>
</h1>
<h6>Created by <span t-field="pr.author.display_name"/></h6>
<t t-set="tmpl">
@ -400,17 +439,192 @@
<t t-else="">open</t>
</t>
<t t-call="runbot_merge.view_pull_request_info_{{tmpl.strip()}}"/>
<t t-set="target_cls" t-value="None if pr.target.active else 'text-muted bg-warning'"/>
<dl class="runbot-merge-fields">
<dt>label</dt>
<dd><span t-field="pr.label"/></dd>
<dt>head</dt>
<dd><a t-attf-href="{{pr.github_url}}/commits/{{pr.head}}"><span t-field="pr.head"/></a></dd>
<dt t-att-class="target_cls">target</dt>
<dd t-att-class="target_cls"><span t-field="pr.target"/></dd>
</dl>
<p t-field="pr.message"/>
<t t-call="runbot_merge.dashboard-table"/>
<p t-field="pr.message_html"/>
</div></div>
</t>
</template>
<record id="dashboard-pre" model="ir.actions.server">
<field name="name">Preparation for the preparation of the PR dashboard content</field>
<field name="state">code</field>
<field name="model_id" ref="base.model_ir_qweb"/>
<field name="code"><![CDATA[
project = pr.repository.project_id
genealogy = pr.batch_id.genealogy_ids
repos = project.repo_ids & genealogy.all_prs.repository
targets = genealogy.all_prs.target
if not genealogy:
# if a PR is closed, it may not have a batch to get a genealogy from,
# in which case it's just a sole soul drifting in the deep dark
branches = pr.target
repos = pr.repository
elif all(p.state in ('merged', 'closed') for p in genealogy[-1].all_prs):
branches = (project.branch_ids & targets)[::-1]
else:
# if the tip of the genealogy is not closed, extend to the furthest limit,
# keeping branches which are active or have an associated batch / PR
limit = min(genealogy.prs.limit_id, key=lambda b: (b.sequence, b.name), default=None)
limit_high = project.branch_ids.ids.index(limit.id) if limit else None
limit = max(targets, key=lambda b: (b.sequence, b.name))
limit_low = project.branch_ids.ids.index(limit.id)
branches = project.branch_ids[limit_high:limit_low+1].filtered(lambda b: b.active or b in targets)[::-1]
action = (project, repos, branches, genealogy)
]]></field>
</record>
<record id="dashboard-prep" model="ir.actions.server">
<field name="name">Preparation of the PR dashboard content</field>
<field name="state">code</field>
<field name="model_id" ref="base.model_ir_qweb"/>
<field name="code"><![CDATA[
batches = {}
for branch in [*branches, branches.browse(())]:
if genealogy:
prs_batch = genealogy.filtered(lambda b: b.target == branch).all_prs
if not (branch or prs_batch):
continue
else:
prs_batch = pr
for repo in repos:
prs = prs_batch.filtered(lambda p: p.repository == repo)
st = 0
detached = False
pr_fmt = []
for p in prs:
st |= (bool(p.error) << 2 | (p.state == 'merged') << 1 | bool(p.blocked) << 0)
done = p.state in ('closed', 'merged')
# this will hide the detachment signal when the PRs are merged/closed, cleaner but less correct?
detached = detached or bool(p.source_id and not p.parent_id and not done)
label = p.state
if p.blocked:
label = "%s, %s" % (label, p.blocked)
pr_fmt.append({
'pr': p,
'number': p.number,
'label': label,
'closed': p.closed,
'backend_url': "/web#view_type=form&model=runbot_merge.pull_requests&id=%d" % p.id,
'github_url': p.github_url,
'checked': done or p.status == 'success',
'reviewed': done or bool(p.reviewed_by),
'attached': done or p.parent_id or not p.source_id,
})
state = None
for i, s in zip(range(2, -1, -1), ['danger', 'success', 'warning']):
if st & (1 << i):
state = s
break
batches[repo, branch] = {
'active': pr in prs,
'detached': detached,
'state': state,
'prs': pr_fmt,
'pr_ids': prs,
}
action = batches
]]></field>
</record>
<template id="dashboard-table">
<t t-set="pre" t-value="pr.env.ref('runbot_merge.dashboard-pre').sudo()._run_action_code_multi({'pr': pr})"/>
<t t-set="repos" t-value="pre[1]"/>
<t t-set="branches" t-value="pre[2]"/>
<t t-set="batches" t-value="env.ref('runbot_merge.dashboard-prep').sudo()._run_action_code_multi({
'pr': pr,
'repos': repos,
'branches': branches,
'genealogy': pre[3],
})"/>
<div t-if="not pr.batch_id.target" class="alert alert-danger">
<p>Inconsistent targets:</p>
<ul><li t-foreach="pr.batch_id.prs" t-as="p">
<a t-att-href="p.url"><t t-out="p.display_name"/></a> has target '<t t-out="p.target.name"/>'</li></ul>
</div>
<table t-else="" class="table table-bordered table-sm">
<colgroup>
<col/>
<col t-foreach="repos" t-as="repo"
t-att-class="'bg-info' if repo == pr.repository else None"
/>
</colgroup>
<thead>
<tr>
<th/>
<th t-foreach="repos" t-as="repo">
<t t-out="repo.name"/>
</th>
</tr>
</thead>
<tbody>
<!--
table-info looks like shit (possibly because no odoo styling so use bg-info
text-muted doesn't do anything, so set some opacity
-->
<tr t-foreach="branches" t-as="branch"
t-att-title="None if branch.active else 'branch is disabled'"
t-attf-class="{{
'bg-info' if branch == pr.target else ''
}} {{
'inactive' if not branch.active else ''
}}">
<td t-out="branch.name or ''"/>
<t t-foreach="repos" t-as="repo">
<t t-set="ps" t-value="batches[repo, branch]"/>
<t t-set="stateclass" t-value="ps['state'] and 'table-'+ps['state']"/>
<t t-set="detached" t-value="ps['detached']"/>
<td t-if="ps['prs']"
t-att-title="'detached' if detached else None"
t-attf-class="{{
'table-active' if ps['active'] else ''
}} {{
'detached' if detached else ''
}}{{stateclass}}">
<!--
there should be only one PR per (repo, target) but
that's not always the case
-->
<span t-foreach="ps['prs']" t-as="p"
t-att-title="p['label']"
t-att-class="'closed' if p['closed'] else None">
<a t-attf-href="/{{repo.name}}/pull/{{p['number']}}">#<t t-out="p['number']"/></a>
<a t-attf-class="fa fa-brands fa-github"
title="Open on Github"
t-att-href="p['github_url']"
/>
<a groups="runbot_merge.group_admin"
title="Open in Backend"
t-attf-class="fa fa-external-link"
t-att-href="p['backend_url']"
/>
<sup t-if="not p['checked']" class="text-danger">missing statuses</sup>
<sup t-if="not p['reviewed']" class="text-danger">missing r+</sup>
<sup t-if="not p['attached']"
t-attf-title="detached: {{p['pr'].detach_reason}}"
class="text-warning fa fa-unlink"/>
<sup t-if="p['pr'].staging_id" class="text-success">
staged
</sup>
<sup t-elif="p['pr']._ready" class="text-success">
ready
</sup>
</span>
</td>
<td t-else=""/>
</t>
</tr>
</tbody>
</table>
</template>
</odoo>