diff --git a/conftest.py b/conftest.py index c47c07a0..8a770095 100644 --- a/conftest.py +++ b/conftest.py @@ -1,4 +1,12 @@ -# -*- coding: utf-8 -*- +from __future__ import annotations + +import datetime +import errno +import select +import shutil +import threading +from typing import Optional + """ Configuration: @@ -46,6 +54,7 @@ import collections import configparser import contextlib import copy +import fcntl import functools import http.client import itertools @@ -64,7 +73,6 @@ import warnings import xmlrpc.client from contextlib import closing -import psutil import pytest import requests @@ -79,7 +87,7 @@ def pytest_addoption(parser): parser.addoption('--coverage', action='store_true') parser.addoption( - '--tunnel', action="store", type="choice", choices=['', 'ngrok', 'localtunnel'], default='', + '--tunnel', action="store", choices=['', 'ngrok', 'localtunnel'], default='', help="Which tunneling method to use to expose the local Odoo server " "to hook up github's webhook. ngrok is more reliable, but " "creating a free account is necessary to avoid rate-limiting " @@ -88,11 +96,27 @@ def pytest_addoption(parser): "blow through the former); localtunnel has no rate-limiting but " "the servers are way less reliable") +def is_manager(config): + return not hasattr(config, 'workerinput') -# noinspection PyUnusedLocal def pytest_configure(config): sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'mergebot_test_utils')) + config.addinivalue_line( + "markers", + "expect_log_errors(reason): allow and require tracebacks in the log", + ) + config.addinivalue_line( + "markers", + "defaultstatuses: use the statuses `default` rather than `ci/runbot,legal/cla`", + ) +def pytest_unconfigure(config): + if not is_manager(config): + return + + for c in config._tmp_path_factory.getbasetemp().iterdir(): + if c.is_file() and c.name.startswith('template-'): + subprocess.run(['dropdb', '--if-exists', c.read_text(encoding='utf-8')]) @pytest.fixture(scope='session', autouse=True) def _set_socket_timeout(): @@ -143,6 +167,14 @@ def rolemap(request, config): @pytest.fixture def partners(env, config, rolemap): + """This specifically does not create partners for ``user`` and ``other`` + so they can be generated on-interaction, as "external" users. + + The two differ in that ``user`` has ownership of the org and can manage + repos there, ``other`` is completely unrelated to anything so useful to + check for interaction where the author only has read access to the reference + repositories. + """ m = {} for role, u in rolemap.items(): if role in ('user', 'other'): @@ -187,6 +219,7 @@ def tunnel(pytestconfig, port): if tunnel == '': yield f'http://localhost:{port}' elif tunnel == 'ngrok': + own = None web_addr = 'http://localhost:4040/api' addr = 'localhost:%d' % port # try to find out if ngrok is running, and if it's not attempt @@ -195,13 +228,9 @@ def tunnel(pytestconfig, port): # FIXME: this is for xdist to avoid workers running ngrok at the # exact same time, use lockfile instead time.sleep(random.SystemRandom().randint(1, 10)) - # FIXME: use config file so we can set web_addr to something else - # than localhost:4040 (otherwise we can't disambiguate - # between the ngrok we started and an ngrok started by - # some other user) requests.get(web_addr) except requests.exceptions.ConnectionError: - subprocess.Popen(NGROK_CLI, stdout=subprocess.DEVNULL) + own = subprocess.Popen(NGROK_CLI, stdout=subprocess.DEVNULL) for _ in range(5): time.sleep(1) with contextlib.suppress(requests.exceptions.ConnectionError): @@ -213,8 +242,8 @@ def tunnel(pytestconfig, port): requests.post(f'{web_addr}/tunnels', json={ 'name': str(port), 'proto': 'http', - 'bind_tls': True, # only https 'addr': addr, + 'schemes': ['https'], 'inspect': True, }).raise_for_status() @@ -242,17 +271,14 @@ def tunnel(pytestconfig, port): raise TimeoutError("ngrok tunnel deletion failed") r = requests.get(f'{web_addr}/tunnels') + assert r.ok, f'{r.reason} {r.text}' # there are still tunnels in the list -> bail - if r.ok and r.json()['tunnels']: + if not own or r.json()['tunnels']: return - # ngrok is broken or all tunnels have been shut down -> try to - # find and kill it (but only if it looks a lot like we started it) - for p in psutil.process_iter(): - if p.name() == 'ngrok' and p.cmdline() == NGROK_CLI: - p.terminate() - break - return + # no more tunnels and we started ngrok -> try to kill it + own.terminate() + own.wait(30) else: raise TimeoutError("ngrok tunnel creation failed (?)") elif tunnel == 'localtunnel': @@ -269,39 +295,73 @@ def tunnel(pytestconfig, port): raise ValueError("Unsupported %s tunnel method" % tunnel) class DbDict(dict): - def __init__(self, adpath): + def __init__(self, adpath, shared_dir): super().__init__() self._adpath = adpath + self._shared_dir = shared_dir def __missing__(self, module): - self[module] = db = 'template_%s' % uuid.uuid4() - with tempfile.TemporaryDirectory() as d: + with contextlib.ExitStack() as atexit: + f = atexit.enter_context(os.fdopen(os.open( + self._shared_dir / f'template-{module}', + os.O_CREAT | os.O_RDWR + ), mode="r+", encoding='utf-8')) + fcntl.lockf(f, fcntl.LOCK_EX) + atexit.callback(fcntl.lockf, f, fcntl.LOCK_UN) + + db = f.read() + if db: + self[module] = db + return db + + d = (self._shared_dir / f'shared-{module}') + d.mkdir() + self[module] = db = 'template_%s' % uuid.uuid4() subprocess.run([ 'odoo', '--no-http', - '--addons-path', self._adpath, - '-d', db, '-i', module + ',auth_oauth', + *(['--addons-path', self._adpath] if self._adpath else []), + '-d', db, '-i', module + ',saas_worker,auth_oauth', '--max-cron-threads', '0', '--stop-after-init', - '--log-level', 'warn' + '--log-level', 'warn', + '--log-handler', 'py.warnings:ERROR', ], check=True, - env={**os.environ, 'XDG_DATA_HOME': d} + env={**os.environ, 'XDG_DATA_HOME': str(d)} ) + f.write(db) + f.flush() + os.fsync(f.fileno()) + subprocess.run(['psql', db, '-c', "UPDATE ir_cron SET nextcall = 'infinity'"]) + return db @pytest.fixture(scope='session') -def dbcache(request): +def dbcache(request, tmp_path_factory, addons_path): """ Creates template DB once per run, then just duplicates it before starting odoo and running the testcase """ - dbs = DbDict(request.config.getoption('--addons-path')) + shared_dir = tmp_path_factory.getbasetemp() + if not is_manager(request.config): + # xdist workers get a subdir as their basetemp, so we need to go one + # level up to deref it + shared_dir = shared_dir.parent + + dbs = DbDict(addons_path, shared_dir) yield dbs - for db in dbs.values(): - subprocess.run(['dropdb', db], check=True) @pytest.fixture -def db(request, module, dbcache): +def db(request, module, dbcache, tmpdir): + template_db = dbcache[module] rundb = str(uuid.uuid4()) - subprocess.run(['createdb', '-T', dbcache[module], rundb], check=True) + subprocess.run(['createdb', '-T', template_db, rundb], check=True) + share = tmpdir.mkdir('share') + shutil.copytree( + str(dbcache._shared_dir / f'shared-{module}'), + str(share), + dirs_exist_ok=True, + ) + (share / 'Odoo' / 'filestore' / template_db).rename( + share / 'Odoo' / 'filestore' / rundb) yield rundb @@ -323,12 +383,14 @@ def wait_for_server(db, port, proc, mod, timeout=120): try: uid = xmlrpc.client.ServerProxy( - 'http://localhost:{}/xmlrpc/2/common'.format(port))\ - .authenticate(db, 'admin', 'admin', {}) + f'http://localhost:{port}/xmlrpc/2/common' + ).authenticate(db, 'admin', 'admin', { + 'base_location': f"http://localhost:{port}", + }) mods = xmlrpc.client.ServerProxy( - 'http://localhost:{}/xmlrpc/2/object'.format(port))\ - .execute_kw( - db, uid, 'admin', 'ir.module.module', 'search_read', [ + f'http://localhost:{port}/xmlrpc/2/object' + ).execute_kw( + db, uid, 'admin', 'ir.module.module', 'search_read', [ [('name', '=', mod)], ['state'] ]) if mods and mods[0].get('state') == 'installed': @@ -344,39 +406,128 @@ def port(): s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) return s.getsockname()[1] +@pytest.fixture +def page(port): + with requests.Session() as s: + def get(url): + r = s.get('http://localhost:{}{}'.format(port, url)) + r.raise_for_status() + return r.content + yield get + @pytest.fixture(scope='session') def dummy_addons_path(): with tempfile.TemporaryDirectory() as dummy_addons_path: mod = pathlib.Path(dummy_addons_path, 'saas_worker') mod.mkdir(0o700) - (mod / '__init__.py').write_bytes(b'') + (mod / '__init__.py').write_text('''\ +import builtins +import logging +import threading + +import psycopg2 + +import odoo +from odoo import api, fields, models + +_logger = logging.getLogger(__name__) + + +class Base(models.AbstractModel): + _inherit = 'base' + + def run_crons(self): + builtins.forwardport_merged_before = self.env.context.get('forwardport_merged_before') + builtins.forwardport_updated_before = self.env.context.get('forwardport_updated_before') + self.env['ir.cron']._process_jobs(self.env.cr.dbname) + del builtins.forwardport_updated_before + del builtins.forwardport_merged_before + return True + + +class IrCron(models.Model): + _inherit = 'ir.cron' + + @classmethod + def _process_jobs(cls, db_name): + t = threading.current_thread() + try: + db = odoo.sql_db.db_connect(db_name) + t.dbname = db_name + with db.cursor() as cron_cr: + # FIXME: override `_get_all_ready_jobs` to directly lock the cron? + while jobs := next(( + job + for j in cls._get_all_ready_jobs(cron_cr) + if (job := cls._acquire_one_job(cron_cr, (j['id'],))) + ), None): + # take into account overridings of _process_job() on that database + registry = odoo.registry(db_name) + registry[cls._name]._process_job(db, cron_cr, job) + cron_cr.commit() + + except psycopg2.ProgrammingError as e: + raise + except Exception: + _logger.warning('Exception in cron:', exc_info=True) + finally: + if hasattr(t, 'dbname'): + del t.dbname +''', encoding='utf-8') (mod / '__manifest__.py').write_text(pprint.pformat({ 'name': 'dummy saas_worker', 'version': '1.0', }), encoding='utf-8') (mod / 'util.py').write_text("""\ -def from_role(_): +def from_role(*_, **__): return lambda fn: fn """, encoding='utf-8') yield dummy_addons_path +@pytest.fixture(scope='session') +def addons_path(request, dummy_addons_path): + return ','.join(map(str, filter(None, [ + request.config.getoption('--addons-path'), + dummy_addons_path, + ]))) + @pytest.fixture -def server(request, db, port, module, dummy_addons_path, tmpdir): +def server(request, db, port, module, addons_path, tmpdir): log_handlers = [ 'odoo.modules.loading:WARNING', + 'py.warnings:ERROR', ] if not request.config.getoption('--log-github'): log_handlers.append('github_requests:WARNING') - addons_path = ','.join(map(str, [ - request.config.getoption('--addons-path'), - dummy_addons_path, - ])) - cov = [] if request.config.getoption('--coverage'): - cov = ['coverage', 'run', '-p', '--source=odoo.addons.runbot_merge,odoo.addons.forwardport', '--branch'] + cov = [ + 'coverage', 'run', + '-p', '--branch', + '--source=odoo.addons.runbot_merge,odoo.addons.forwardport', + '--context', request.node.nodeid, + '-m', + ] + + r, w = os.pipe2(os.O_NONBLOCK) + buf = bytearray() + def _move(inpt=r, output=sys.stdout.fileno()): + while p.poll() is None: + readable, _, _ = select.select([inpt], [], [], 1) + if readable: + r = os.read(inpt, 4096) + if not r: + break + try: + os.write(output, r) + except OSError as e: + if e.errno == errno.EBADF: + break + raise + buf.extend(r) + os.close(inpt) p = subprocess.Popen([ *cov, @@ -385,25 +536,46 @@ def server(request, db, port, module, dummy_addons_path, tmpdir): '-d', db, '--max-cron-threads', '0', # disable cron threads (we're running crons by hand) *itertools.chain.from_iterable(('--log-handler', h) for h in log_handlers), - ], env={ + ], stderr=w, env={ **os.environ, # stop putting garbage in the user dirs, and potentially creating conflicts # TODO: way to override this with macOS? - 'XDG_DATA_HOME': str(tmpdir.mkdir('share')), + 'XDG_DATA_HOME': str(tmpdir / 'share'), 'XDG_CACHE_HOME': str(tmpdir.mkdir('cache')), }) + os.close(w) + # start the reader thread here so `_move` can read `p` without needing + # additional handholding + threading.Thread(target=_move, daemon=True).start() try: wait_for_server(db, port, p, module) - yield p + yield p, buf finally: p.terminate() p.wait(timeout=30) @pytest.fixture -def env(port, server, db, default_crons): - yield Environment(port, db, default_crons) +def env(request, port, server, db): + yield Environment(port, db) + if request.node.get_closest_marker('expect_log_errors'): + if b"Traceback (most recent call last):" not in server[1]: + pytest.fail("should have found error in logs.") + else: + if b"Traceback (most recent call last):" in server[1]: + pytest.fail("unexpected error in logs, fix, or mark function as `expect_log_errors` to require.") + +@pytest.fixture +def reviewer_admin(env, partners): + env['res.users'].create({ + 'partner_id': partners['reviewer'].id, + 'login': 'reviewer', + 'groups_id': [ + (4, env.ref("base.group_user").id, 0), + (4, env.ref("runbot_merge.group_admin").id, 0), + ], + }) def check(response): assert response.ok, response.text or response.reason @@ -412,6 +584,10 @@ def check(response): # to) break the existing local tests @pytest.fixture def make_repo(capsys, request, config, tunnel, users): + """Fixtures which creates a repository on the github side, plugs webhooks + in, and registers the repository for deletion on cleanup (unless + ``--no-delete`` is set) + """ owner = config['github']['owner'] github = requests.Session() github.headers['Authorization'] = 'token %s' % config['github']['token'] @@ -489,7 +665,6 @@ def _rate_limited(req): if not q.ok and q.headers.get('X-RateLimit-Remaining') == '0': reset = int(q.headers['X-RateLimit-Reset']) delay = max(0, round(reset - time.time() + 1.0)) - print("Hit rate limit, sleeping for", delay, "seconds") time.sleep(delay) continue break @@ -505,6 +680,9 @@ class Repo: self.hook = False repos.append(self) + def __repr__(self): + return f'' + @property def owner(self): return self.name.split('/')[0] @@ -542,14 +720,13 @@ class Repo: assert self.hook r = self._session.get( 'https://api.github.com/repos/{}/hooks'.format(self.name)) - response = r.json() - assert 200 <= r.status_code < 300, response - [hook] = response + assert 200 <= r.status_code < 300, r.text + [hook] = r.json() r = self._session.patch('https://api.github.com/repos/{}/hooks/{}'.format(self.name, hook['id']), json={ 'config': {**hook['config'], 'secret': secret}, }) - assert 200 <= r.status_code < 300, r.json() + assert 200 <= r.status_code < 300, r.text def get_ref(self, ref): # differs from .commit(ref).id for the sake of assertion error messages @@ -574,7 +751,7 @@ class Repo: assert res['object']['type'] == 'commit' return res['object']['sha'] - def commit(self, ref): + def commit(self, ref: str) -> Commit: if not re.match(r'[0-9a-f]{40}', ref): if not ref.startswith(('heads/', 'refs/heads/')): ref = 'refs/heads/' + ref @@ -585,12 +762,11 @@ class Repo: ref = 'refs/' + ref r = self._session.get('https://api.github.com/repos/{}/commits/{}'.format(self.name, ref)) - response = r.json() - assert 200 <= r.status_code < 300, response + assert 200 <= r.status_code < 300, r.text - return self._commit_from_gh(response) + return self._commit_from_gh(r.json()) - def _commit_from_gh(self, gh_commit): + def _commit_from_gh(self, gh_commit: dict) -> Commit: c = gh_commit['commit'] return Commit( id=gh_commit['sha'], @@ -608,14 +784,14 @@ class Repo: :rtype: Dict[str, str] """ r = self._session.get('https://api.github.com/repos/{}/git/trees/{}'.format(self.name, commit.tree)) - assert 200 <= r.status_code < 300, r.json() + assert 200 <= r.status_code < 300, r.text # read tree's blobs tree = {} for t in r.json()['tree']: assert t['type'] == 'blob', "we're *not* doing recursive trees in test cases" r = self._session.get('https://api.github.com/repos/{}/git/blobs/{}'.format(self.name, t['sha'])) - assert 200 <= r.status_code < 300, r.json() + assert 200 <= r.status_code < 300, r.text tree[t['path']] = base64.b64decode(r.json()['content']).decode() return tree @@ -645,7 +821,7 @@ class Repo: 'required_pull_request_reviews': None, 'restrictions': None, }) - assert 200 <= r.status_code < 300, r.json() + assert 200 <= r.status_code < 300, r.text # FIXME: remove this (runbot_merge should use make_commits directly) def make_commit(self, ref, message, author, committer=None, tree=None, wait=True): @@ -748,7 +924,16 @@ class Repo: )).raise_for_status() return PR(self, number) - def make_pr(self, *, title=None, body=None, target, head, draft=False, token=None): + def make_pr( + self, + *, + title: Optional[str] = None, + body: Optional[str] = None, + target: str, + head: str, + draft: bool = False, + token: Optional[str] = None + ) -> PR: assert self.hook self.hook = 2 @@ -781,10 +966,9 @@ class Repo: }, headers=headers, ) - pr = r.json() - assert 200 <= r.status_code < 300, pr + assert 200 <= r.status_code < 300, r.text - return PR(self, pr['number']) + return PR(self, r.json()['number']) def post_status(self, ref, status, context='default', **kw): assert self.hook @@ -795,7 +979,7 @@ class Repo: 'context': context, **kw }) - assert 200 <= r.status_code < 300, r.json() + assert 200 <= r.status_code < 300, r.text def is_ancestor(self, sha, of): return any(c['sha'] == sha for c in self.log(of)) @@ -806,7 +990,7 @@ class Repo: 'https://api.github.com/repos/{}/commits'.format(self.name), params={'sha': ref_or_sha, 'page': page} ) - assert 200 <= r.status_code < 300, r.json() + assert 200 <= r.status_code < 300, r.text yield from r.json() if not r.links.get('next'): return @@ -874,7 +1058,7 @@ class PR: 'https://api.github.com/repos/{}/pulls/{}'.format(self.repo.name, self.number), headers=caching ) - assert r.ok, r.json() + assert r.ok, r.text if r.status_code == 304: return previous contents, caching = self._cache = r.json(), {} @@ -919,7 +1103,7 @@ class PR: @property def comments(self): r = self.repo._session.get('https://api.github.com/repos/{}/issues/{}/comments'.format(self.repo.name, self.number)) - assert 200 <= r.status_code < 300, r.json() + assert 200 <= r.status_code < 300, r.text return [Comment(c) for c in r.json()] @property @@ -936,7 +1120,7 @@ class PR: json={'body': body}, headers=headers, ) - assert 200 <= r.status_code < 300, r.json() + assert 200 <= r.status_code < 300, r.text return r.json()['id'] def edit_comment(self, cid, body, token=None): @@ -949,7 +1133,7 @@ class PR: json={'body': body}, headers=headers ) - assert 200 <= r.status_code < 300, r.json() + assert 200 <= r.status_code < 300, r.text wait_for_hook() def delete_comment(self, cid, token=None): @@ -961,7 +1145,7 @@ class PR: 'https://api.github.com/repos/{}/issues/comments/{}'.format(self.repo.name, cid), headers=headers ) - assert r.status_code == 204, r.json() + assert r.status_code == 204, r.text def _set_prop(self, prop, value, token=None): assert self.repo.hook @@ -985,7 +1169,7 @@ class PR: self.repo.name, self.number, )) - assert 200 <= r.status_code < 300, r.json() + assert 200 <= r.status_code < 300, r.text info = r.json() repo = self.repo @@ -1006,7 +1190,7 @@ class PR: json={'body': body, 'event': state,}, headers=headers ) - assert 200 <= r.status_code < 300, r.json() + assert 200 <= r.status_code < 300, r.text PRBranch = collections.namedtuple('PRBranch', 'repo branch') class LabelsProxy(collections.abc.MutableSet): @@ -1017,7 +1201,7 @@ class LabelsProxy(collections.abc.MutableSet): def _labels(self): pr = self._pr r = pr.repo._session.get('https://api.github.com/repos/{}/issues/{}/labels'.format(pr.repo.name, pr.number)) - assert r.ok, r.json() + assert r.ok, r.text return {label['name'] for label in r.json()} def __repr__(self): @@ -1043,14 +1227,14 @@ class LabelsProxy(collections.abc.MutableSet): r = pr.repo._session.post('https://api.github.com/repos/{}/issues/{}/labels'.format(pr.repo.name, pr.number), json={ 'labels': [label] }) - assert r.ok, r.json() + assert r.ok, r.text def discard(self, label): pr = self._pr assert pr.repo.hook r = pr.repo._session.delete('https://api.github.com/repos/{}/issues/{}/labels/{}'.format(pr.repo.name, pr.number, label)) # discard should do nothing if the item didn't exist in the set - assert r.ok or r.status_code == 404, r.json() + assert r.ok or r.status_code == 404, r.text def update(self, *others): pr = self._pr @@ -1059,14 +1243,13 @@ class LabelsProxy(collections.abc.MutableSet): r = pr.repo._session.post('https://api.github.com/repos/{}/issues/{}/labels'.format(pr.repo.name, pr.number), json={ 'labels': list(set(itertools.chain.from_iterable(others))) }) - assert r.ok, r.json() + assert r.ok, r.text class Environment: - def __init__(self, port, db, default_crons=()): + def __init__(self, port, db): self._uid = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/common'.format(port)).authenticate(db, 'admin', 'admin', {}) self._object = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/object'.format(port)) self._db = db - self._default_crons = default_crons def __call__(self, model, method, *args, **kwargs): return self._object.execute_kw( @@ -1078,17 +1261,30 @@ class Environment: def __getitem__(self, name): return Model(self, name) + def ref(self, xid, raise_if_not_found=True): + model, obj_id = self( + 'ir.model.data', 'check_object_reference', + *xid.split('.', 1), + raise_on_access_error=raise_if_not_found + ) + return Model(self, model, [obj_id]) if obj_id else None + + def run_crons(self, *xids, **kw): - crons = xids or self._default_crons - print('running crons', crons, file=sys.stderr) + crons = xids or ['runbot_merge.check_linked_prs_status'] + cron_ids = [] for xid in crons: - t0 = time.time() - print('\trunning cron', xid, '...', file=sys.stderr) + if xid is None: + continue + model, cron_id = self('ir.model.data', 'check_object_reference', *xid.split('.', 1)) assert model == 'ir.cron', "Expected {} to be a cron, got {}".format(xid, model) - self('ir.cron', 'method_direct_trigger', [cron_id], **kw) - print('\tdone %.3fs' % (time.time() - t0), file=sys.stderr) - print('done', file=sys.stderr) + cron_ids.append(cron_id) + if cron_ids: + self('ir.cron', 'write', cron_ids, { + 'nextcall': (datetime.datetime.utcnow() - datetime.timedelta(seconds=30)).isoformat(" ", "seconds") + }, **kw) + self('base', 'run_crons', [], **kw) # sleep for some time as a lot of crap may have happened (?) wait_for_hook() @@ -1117,6 +1313,9 @@ class Model: def __len__(self): return len(self._ids) + def __hash__(self): + return hash((self._model, frozenset(self._ids))) + def __eq__(self, other): if not isinstance(other, Model): return NotImplemented @@ -1144,9 +1343,13 @@ class Model: # because sorted is not xmlrpc-compatible (it doesn't downgrade properly) def sorted(self, field): - rs = self.read([field]) - rs.sort(key=lambda r: r[field]) - return Model(self._env, self._model, [r['id'] for r in rs]) + fn = field if callable(field) else lambda r: r[field] + + return Model(self._env, self._model, ( + id + for record in sorted(self, key=fn) + for id in record.ids + )) def __getitem__(self, index): if isinstance(index, str): diff --git a/forwardport/__manifest__.py b/forwardport/__manifest__.py index 3ece49af..cb24f840 100644 --- a/forwardport/__manifest__.py +++ b/forwardport/__manifest__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- { 'name': 'forward port bot', - 'version': '1.2', + 'version': '1.4', 'summary': "A port which forward ports successful PRs.", 'depends': ['runbot_merge'], 'data': [ diff --git a/forwardport/changelog/2023-08/outstanding.md b/forwardport/changelog/2023-08/outstanding.md new file mode 100644 index 00000000..2de5ef2c --- /dev/null +++ b/forwardport/changelog/2023-08/outstanding.md @@ -0,0 +1,10 @@ +IMP: outstandings page + +- increased time-before-outstanding from 3 to 7 days, as 3~4 days is common in + normal operations, especially when merging from very low branches were + forward-porting may take a while +- improved performances by optimising fetching & filtering +- added counts to the main listing for clarity (instead of hiding them in a + popover) +- added the *original authors* for the outstanding forward ports +- added ability to filter by team, if such are configured diff --git a/forwardport/controllers.py b/forwardport/controllers.py index af464dcb..7d7515d1 100644 --- a/forwardport/controllers.py +++ b/forwardport/controllers.py @@ -1,7 +1,14 @@ +import collections +import datetime import pathlib +import werkzeug.urls + +from odoo.http import route, request +from odoo.osv import expression from odoo.addons.runbot_merge.controllers.dashboard import MergebotDashboard +DEFAULT_DELTA = datetime.timedelta(days=7) class Dashboard(MergebotDashboard): def _entries(self): changelog = pathlib.Path(__file__).parent / 'changelog' @@ -13,3 +20,81 @@ class Dashboard(MergebotDashboard): for d in changelog.iterdir() ] + + @route('/forwardport/outstanding', type='http', methods=['GET'], auth="user", website=True, sitemap=False) + def outstanding(self, partner=0, authors=True, reviewers=True, group=0): + Partners = request.env['res.partner'] + PullRequests = request.env['runbot_merge.pull_requests'] + partner = Partners.browse(int(partner)) + group = Partners.browse(int(group)) + authors = int(authors) + reviewers = int(reviewers) + link = lambda **kw: '?' + werkzeug.urls.url_encode({'partner': partner.id or 0, 'authors': authors, 'reviewers': reviewers, **kw, }) + groups = Partners.search([('is_company', '=', True), ('child_ids', '!=', False)]) + if not (authors or reviewers): + return request.render('forwardport.outstanding', { + 'authors': 0, + 'reviewers': 0, + 'single': partner, + 'culprits': partner, + 'groups': groups, + 'current_group': group, + 'outstanding': [], + 'outstanding_per_author': {partner: 0}, + 'outstanding_per_reviewer': {partner: 0}, + 'link': link, + }) + + source_filter = [('merge_date', '<', datetime.datetime.now() - DEFAULT_DELTA)] + partner_filter = [] + if partner or group: + if partner: + suffix = '' + arg = partner.id + else: + suffix = '.commercial_partner_id' + arg = group.id + + if authors: + partner_filter.append([(f'author{suffix}', '=', arg)]) + if reviewers: + partner_filter.append([(f'reviewed_by{suffix}', '=', arg)]) + + source_filter.extend(expression.OR(partner_filter)) + + outstanding = PullRequests.search([ + ('state', 'in', ['opened', 'validated', 'approved', 'ready', 'error']), + ('source_id', 'in', PullRequests._search(source_filter)), + ]) + + outstanding_per_group = collections.Counter() + outstanding_per_author = collections.Counter() + outstanding_per_reviewer = collections.Counter() + outstandings = [] + for source in outstanding.mapped('source_id').sorted('merge_date'): + prs = source.forwardport_ids.filtered(lambda p: p.state not in ['merged', 'closed']) + outstandings.append({ + 'source': source, + 'prs': prs, + }) + if authors: + outstanding_per_author[source.author] += len(prs) + outstanding_per_group[source.author.commercial_partner_id] += len(prs) + if reviewers and source: + outstanding_per_reviewer[source.reviewed_by] += len(prs) + outstanding_per_group[source.reviewed_by.commercial_partner_id] += len(prs) + + culprits = Partners.browse(p.id for p, _ in (outstanding_per_reviewer + outstanding_per_author).most_common()) + return request.render('forwardport.outstanding', { + 'authors': authors, + 'reviewers': reviewers, + 'single': partner, + 'culprits': culprits, + 'groups': groups, + 'current_group': group, + 'outstanding_per_author': outstanding_per_author, + 'outstanding_per_reviewer': outstanding_per_reviewer, + 'outstanding_per_group': outstanding_per_group, + 'outstanding': outstandings, + 'link': link, + }) diff --git a/forwardport/data/crons.xml b/forwardport/data/crons.xml index 1360914c..b3ce4591 100644 --- a/forwardport/data/crons.xml +++ b/forwardport/data/crons.xml @@ -4,10 +4,11 @@ code model._process() - 1 - minutes + 6 + hours -1 + 43 @@ -15,10 +16,11 @@ code model._process() - 1 - minutes + 6 + hours -1 + 46 @@ -37,22 +39,9 @@ code model._process() - 1 + 6 hours -1 - - - Maintenance of repo cache - - code - model._run() - - - 1 - weeks - -1 - - diff --git a/forwardport/data/security.xml b/forwardport/data/security.xml index 424e7991..99548e0c 100644 --- a/forwardport/data/security.xml +++ b/forwardport/data/security.xml @@ -43,13 +43,4 @@ 0 0 - - - Access to maintenance is useless - - 0 - 0 - 0 - 0 - diff --git a/forwardport/data/views.xml b/forwardport/data/views.xml index 6cb54653..f9d7f378 100644 --- a/forwardport/data/views.xml +++ b/forwardport/data/views.xml @@ -10,13 +10,12 @@ @@ -30,67 +29,108 @@ bg-warning - - Outstanding forward ports - qweb - /forwardport/outstanding - - True - forwardport.outstanding_fp - - - - -
- -

List of pull requests with outstanding forward ports

- -
-

- merged by -

-
- - -
- - by - merged - - by - -
-
- Outstanding forward-ports: -
    -
  • - - () - targeting -
  • -
-
+ @@ -142,8 +157,7 @@ - - + @@ -152,12 +166,6 @@ help="Repository where forward port branches will be created" /> - - - - @@ -173,37 +181,4 @@ - - Show forwardport PR fields - - runbot_merge.pull_requests - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/forwardport/migrations/15.0.1.3/pre-migration.py b/forwardport/migrations/15.0.1.3/pre-migration.py new file mode 100644 index 00000000..ab2fbdd4 --- /dev/null +++ b/forwardport/migrations/15.0.1.3/pre-migration.py @@ -0,0 +1,9 @@ +import pathlib + +from odoo.tools.appdirs import user_cache_dir + + +def migrate(_cr, _version): + # avoid needing to re-clone our repo unnecessarily + pathlib.Path(user_cache_dir('forwardport')).rename( + pathlib.Path(user_cache_dir('mergebot'))) diff --git a/forwardport/migrations/15.0.1.4/pre-migration.py b/forwardport/migrations/15.0.1.4/pre-migration.py new file mode 100644 index 00000000..b1084e5b --- /dev/null +++ b/forwardport/migrations/15.0.1.4/pre-migration.py @@ -0,0 +1,7 @@ +def migrate(cr, version): + cr.execute("ALTER TABLE runbot_merge_project DROP COLUMN IF EXISTS fp_github_email") + cr.execute(""" + ALTER TABLE runbot_merge_branch + DROP COLUMN IF EXISTS fp_sequence, + DROP COLUMN IF EXISTS fp_target + """) diff --git a/forwardport/models/forwardport.py b/forwardport/models/forwardport.py index f94b6db9..19a4ae46 100644 --- a/forwardport/models/forwardport.py +++ b/forwardport/models/forwardport.py @@ -1,20 +1,21 @@ # -*- coding: utf-8 -*- +import builtins import logging -import pathlib -import resource -import subprocess -import uuid +import re from contextlib import ExitStack from datetime import datetime, timedelta +import requests +import sentry_sdk from dateutil import relativedelta from odoo import fields, models +from odoo.addons.runbot_merge import git from odoo.addons.runbot_merge.github import GH -from odoo.tools.appdirs import user_cache_dir # how long a merged PR survives MERGE_AGE = relativedelta.relativedelta(weeks=2) +FOOTER = '\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n' _logger = logging.getLogger(__name__) @@ -26,19 +27,34 @@ class Queue: raise NotImplementedError def _process(self): - for b in self.search(self._search_domain(), order='create_date, id', limit=self.limit): + skip = 0 + from_clause, where_clause, params = self._search(self._search_domain(), order='create_date, id', limit=1).get_sql() + for _ in range(self.limit): + self.env.cr.execute(f""" + SELECT id FROM {from_clause} + WHERE {where_clause or "true"} + ORDER BY create_date, id + LIMIT 1 OFFSET %s + FOR UPDATE SKIP LOCKED + """, [*params, skip]) + b = self.browse(self.env.cr.fetchone()) + if not b: + return + try: - b._process_item() + with sentry_sdk.start_span(description=self._name): + b._process_item() b.unlink() self.env.cr.commit() except Exception: _logger.exception("Error while processing %s, skipping", b) self.env.cr.rollback() - b._on_failure() + if b._on_failure(): + skip += 1 self.env.cr.commit() def _on_failure(self): - pass + return True def _search_domain(self): return [] @@ -49,13 +65,25 @@ class ForwardPortTasks(models.Model, Queue): limit = 10 - batch_id = fields.Many2one('runbot_merge.batch', required=True) + batch_id = fields.Many2one('runbot_merge.batch', required=True, index=True) source = fields.Selection([ ('merge', 'Merge'), ('fp', 'Forward Port Followup'), - ('insert', 'New branch port') + ('insert', 'New branch port'), + ('complete', 'Complete ported batches'), ], required=True) retry_after = fields.Datetime(required=True, default='1900-01-01 01:01:01') + pr_id = fields.Many2one('runbot_merge.pull_requests') + + def create(self, vals_list): + self.env.ref('forwardport.port_forward')._trigger() + return super().create(vals_list) + + def write(self, vals): + if retry := vals.get('retry_after'): + self.env.ref('forwardport.port_forward')\ + ._trigger(fields.Datetime.to_datetime(retry)) + return super().write(vals) def _search_domain(self): return super()._search_domain() + [ @@ -68,44 +96,169 @@ class ForwardPortTasks(models.Model, Queue): def _process_item(self): batch = self.batch_id - newbatch = batch.prs._port_forward() + sentry_sdk.set_tag('forward-porting', batch.prs.mapped('display_name')) + if self.source == 'complete': + self._complete_batches() + return - if newbatch: - _logger.info( - "Processing %s (from %s): %s (%s) -> %s (%s)", - self.id, self.source, - batch, batch.prs, - newbatch, newbatch.prs, - ) - # insert new batch in ancestry sequence unless conflict (= no parent) - if self.source == 'insert': - for pr in newbatch.prs: - if not pr.parent_id: - break - newchild = pr.search([ - ('parent_id', '=', pr.parent_id.id), - ('id', '!=', pr.id), - ]) - if newchild: - newchild.parent_id = pr.id - else: # reached end of seq (or batch is empty) + newbatch = batch._port_forward() + if not newbatch: # reached end of seq (or batch is empty) # FIXME: or configuration is fucky so doesn't want to FP (maybe should error and retry?) _logger.info( - "Processing %s (from %s): %s (%s) -> end of the sequence", - self.id, self.source, - batch, batch.prs + "Processed %s from %s (%s) -> end of the sequence", + batch, self.source, batch.prs.mapped('display_name'), ) - batch.active = False + return + _logger.info( + "Processed %s from %s (%s) -> %s (%s)", + batch, self.source, ', '.join(batch.prs.mapped('display_name')), + newbatch, ', '.join(newbatch.prs.mapped('display_name')), + ) + # insert new batch in ancestry sequence + if self.source == 'insert': + self._process_insert(batch, newbatch) + + def _process_insert(self, batch, newbatch): + self.env['runbot_merge.batch'].search([ + ('parent_id', '=', batch.id), + ('id', '!=', newbatch.id), + ]).parent_id = newbatch.id + # insert new PRs in ancestry sequence unless conflict (= no parent) + for pr in newbatch.prs: + next_target = pr._find_next_target() + if not next_target: + continue + + # should have one since it was inserted before an other PR? + descendant = pr.search([ + ('target', '=', next_target.id), + ('source_id', '=', pr.source_id.id), + ]) + + # copy the reviewing of the "descendant" (even if detached) to this pr + if reviewer := descendant.reviewed_by: + pr.reviewed_by = reviewer + + # replace parent_id *if not detached* + if descendant.parent_id: + descendant.parent_id = pr.id + + def _complete_batches(self): + source = pr = self.pr_id + if not pr: + _logger.warning( + "Unable to complete descendants of %s (%s): no new PR", + self.batch_id, + self.batch_id.prs.mapped('display_name'), + ) + return + _logger.info( + "Completing batches for descendants of %s (added %s)", + self.batch_id.prs.mapped('display_name'), + self.pr_id.display_name, + ) + + gh = requests.Session() + repository = pr.repository + gh.headers['Authorization'] = f'token {repository.project_id.fp_github_token}' + PullRequests = self.env['runbot_merge.pull_requests'] + self.env.cr.execute('LOCK runbot_merge_pull_requests IN SHARE MODE') + + # TODO: extract complete list of targets from `_find_next_target` + # so we can create all the forwardport branches, push them, and + # only then create the PR objects + # TODO: maybe do that after making forward-port WC-less, so all the + # branches can be pushed atomically at once + for descendant in self.batch_id.descendants(): + target = pr._find_next_target() + if target is None: + _logger.info("Will not forward-port %s: no next target", pr.display_name) + return + + if PullRequests.search_count([ + ('source_id', '=', source.id), + ('target', '=', target.id), + ('state', 'not in', ('closed', 'merged')), + ]): + _logger.warning("Will not forward-port %s: already ported", pr.display_name) + return + + if target != descendant.target: + self.env['runbot_merge.pull_requests.feedback'].create({ + 'repository': repository.id, + 'pull_request': source.id, + 'token_field': 'fp_github_token', + 'message': """\ +{pr.ping}unable to port this PR forwards due to inconsistency: goes from \ +{pr.target.name} to {next_target.name} but {batch} ({batch_prs}) targets \ +{batch.target.name}. +""".format(pr=pr, next_target=target, batch=descendant, batch_prs=', '.join(descendant.mapped('prs.display_name'))) + }) + return + + ref = descendant.prs[:1].refname + # NOTE: ports the new source everywhere instead of porting each + # PR to the next step as it does not *stop* on conflict + repo = git.get_local(source.repository) + conflict, head = source._create_fp_branch(repo, target) + repo.push(git.fw_url(pr.repository), f'{head}:refs/heads/{ref}') + + remote_target = repository.fp_remote_target + owner, _ = remote_target.split('/', 1) + message = source.message + f"\n\nForward-Port-Of: {pr.display_name}" + + title, body = re.match(r'(?P[^\n]+)\n*(?P<body>.*)', message, flags=re.DOTALL).groups() + r = gh.post(f'https://api.github.com/repos/{pr.repository.name}/pulls', json={ + 'base': target.name, + 'head': f'{owner}:{ref}', + 'title': '[FW]' + (' ' if title[0] != '[' else '') + title, + 'body': body + }) + if not r.ok: + _logger.warning("Failed to create forward-port PR for %s, deleting branches", pr.display_name) + # delete all the branches this should automatically close the + # PRs if we've created any. Using the API here is probably + # simpler than going through the working copies + d = gh.delete(f'https://api.github.com/repos/{remote_target}/git/refs/heads/{ref}') + if d.ok: + _logger.info("Deleting %s:%s=success", remote_target, ref) + else: + _logger.warning("Deleting %s:%s=%s", remote_target, ref, d.text) + raise RuntimeError(f"Forwardport failure: {pr.display_name} ({r.text})") + + new_pr = PullRequests._from_gh(r.json()) + _logger.info("Created forward-port PR %s", new_pr) + new_pr.write({ + 'batch_id': descendant.id, # should already be set correctly but... + 'merge_method': pr.merge_method, + 'source_id': source.id, + # only link to previous PR of sequence if cherrypick passed + # FIXME: apply parenting of siblings? Apply parenting *to* siblings? + 'parent_id': pr.id if not conflict else False, + 'detach_reason': "{1}\n{2}".format(*conflict).strip() if conflict else None, + }) + + if conflict: + self.env.ref('runbot_merge.forwardport.failure.conflict')._send( + repository=pr.repository, + pull_request=pr.number, + token_field='fp_github_token', + format_args={'source': source, 'pr': pr, 'new': new_pr, 'footer': FOOTER}, + ) + new_pr._fp_conflict_feedback(pr, {pr: conflict}) + + labels = ['forwardport'] + if conflict: + labels.append('conflict') + self.env['runbot_merge.pull_requests.tagging'].create({ + 'repository': new_pr.repository.id, + 'pull_request': new_pr.number, + 'tags_add': labels, + }) + + pr = new_pr -CONFLICT_TEMPLATE = "{ping}WARNING: the latest change ({previous.head}) triggered " \ - "a conflict when updating the next forward-port " \ - "({next.display_name}), and has been ignored.\n\n" \ - "You will need to update this pull request differently, " \ - "or fix the issue by hand on {next.display_name}." -CHILD_CONFLICT = "{ping}WARNING: the update of {previous.display_name} to " \ - "{previous.head} has caused a conflict in this pull request, " \ - "data may have been lost." class UpdateQueue(models.Model, Queue): _name = 'forwardport.updates' _description = 'if a forward-port PR gets updated & has followups (cherrypick succeeded) the followups need to be updated as well' @@ -115,9 +268,13 @@ class UpdateQueue(models.Model, Queue): original_root = fields.Many2one('runbot_merge.pull_requests') new_root = fields.Many2one('runbot_merge.pull_requests') + def create(self, vals_list): + self.env.ref('forwardport.updates')._trigger() + return super().create(vals_list) + def _process_item(self): - Feedback = self.env['runbot_merge.pull_requests.feedback'] previous = self.new_root + sentry_sdk.set_tag("update-root", self.new_root.display_name) with ExitStack() as s: for child in self.new_root._iter_descendants(): self.env.cr.execute(""" @@ -134,45 +291,39 @@ class UpdateQueue(models.Model, Queue): self.new_root.display_name ) if child.state in ('closed', 'merged'): - Feedback.create({ - 'repository': child.repository.id, - 'pull_request': child.number, - 'message': "%sancestor PR %s has been updated but this PR" - " is %s and can't be updated to match." - "\n\n" - "You may want or need to manually update any" - " followup PR." % ( - child.ping(), - self.new_root.display_name, - child.state, - ) - }) + self.env.ref('runbot_merge.forwardport.updates.closed')._send( + repository=child.repository, + pull_request=child.number, + token_field='fp_github_token', + format_args={'pr': child, 'parent': self.new_root}, + ) return - conflicts, working_copy = previous._create_fp_branch( - child.target, child.refname, s) + repo = git.get_local(previous.repository) + conflicts, new_head = previous._create_fp_branch(repo, child.target) + if conflicts: _, out, err, _ = conflicts - Feedback.create({ - 'repository': previous.repository.id, - 'pull_request': previous.number, - 'message': CONFLICT_TEMPLATE.format( - ping=previous.ping(), - previous=previous, - next=child - ) - }) - Feedback.create({ - 'repository': child.repository.id, - 'pull_request': child.number, - 'message': CHILD_CONFLICT.format(ping=child.ping(), previous=previous, next=child)\ - + (f'\n\nstdout:\n```\n{out.strip()}\n```' if out.strip() else '') - + (f'\n\nstderr:\n```\n{err.strip()}\n```' if err.strip() else '') - }) + self.env.ref('runbot_merge.forwardport.updates.conflict.parent')._send( + repository=previous.repository, + pull_request=previous.number, + token_field='fp_github_token', + format_args={'pr': previous, 'next': child}, + ) + self.env.ref('runbot_merge.forwardport.updates.conflict.child')._send( + repository=child.repository, + pull_request=child.number, + token_field='fp_github_token', + format_args={ + 'previous': previous, + 'pr': child, + 'stdout': (f'\n\nstdout:\n```\n{out.strip()}\n```' if out.strip() else ''), + 'stderr': (f'\n\nstderr:\n```\n{err.strip()}\n```' if err.strip() else ''), + }, + ) - new_head = working_copy.stdout().rev_parse(child.refname).stdout.decode().strip() - commits_count = int(working_copy.stdout().rev_list( - f'{child.target.name}..{child.refname}', + commits_count = int(repo.stdout().rev_list( + f'{child.target.name}..{new_head}', count=True ).stdout.decode().strip()) old_head = child.head @@ -182,16 +333,11 @@ class UpdateQueue(models.Model, Queue): # 'state': 'opened', 'squash': commits_count == 1, }) - # push the new head to the local cache: in some cases github - # doesn't propagate revisions fast enough so on the next loop we - # can't find the revision we just pushed - dummy_branch = str(uuid.uuid4()) - ref = previous._get_local_directory() - working_copy.push(ref._directory, f'{new_head}:refs/heads/{dummy_branch}') - ref.branch('--delete', '--force', dummy_branch) # then update the child's branch to the new head - working_copy.push(f'--force-with-lease={child.refname}:{old_head}', - 'target', child.refname) + repo.push( + f'--force-with-lease={child.refname}:{old_head}', + git.fw_url(child.repository), + f"{new_head}:refs/heads/{child.refname}") # committing here means github could technically trigger its # webhook before sending a response, but committing before @@ -211,8 +357,12 @@ class DeleteBranches(models.Model, Queue): pr_id = fields.Many2one('runbot_merge.pull_requests') + def create(self, vals_list): + self.env.ref('forwardport.remover')._trigger(datetime.now() - MERGE_AGE) + return super().create(vals_list) + def _search_domain(self): - cutoff = self.env.context.get('forwardport_merged_before') \ + cutoff = getattr(builtins, 'forwardport_merged_before', None) \ or fields.Datetime.to_string(datetime.now() - MERGE_AGE) return [('pr_id.merge_date', '<', cutoff)] @@ -270,46 +420,3 @@ class DeleteBranches(models.Model, Queue): r.json() ) _deleter.info('✔ deleted branch %s of PR %s', self.pr_id.label, self.pr_id.display_name) - -_gc = _logger.getChild('maintenance') -def _bypass_limits(): - """Allow git to go beyond the limits set for Odoo. - - On large repositories, git gc can take a *lot* of memory (especially with - `--aggressive`), if the Odoo limits are too low this can prevent the gc - from running, leading to a lack of packing and a massive amount of cruft - accumulating in the working copy. - """ - resource.setrlimit(resource.RLIMIT_AS, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) - -class GC(models.TransientModel): - _name = 'forwardport.maintenance' - _description = "Weekly maintenance of... cache repos?" - - def _run(self): - # lock out the forward port cron to avoid concurrency issues while we're - # GC-ing it: wait until it's available, then SELECT FOR UPDATE it, - # which should prevent cron workers from running it - fp_cron = self.env.ref('forwardport.port_forward') - self.env.cr.execute(""" - SELECT 1 FROM ir_cron - WHERE id = %s - FOR UPDATE - """, [fp_cron.id]) - - repos_dir = pathlib.Path(user_cache_dir('forwardport')) - # run on all repos with a forwardport target (~ forwardport enabled) - for repo in self.env['runbot_merge.repository'].search([('fp_remote_target', '!=', False)]): - repo_dir = repos_dir / repo.name - if not repo_dir.is_dir(): - continue - - _gc.info('Running maintenance on %s', repo.name) - r = subprocess.run( - ['git', '--git-dir', repo_dir, 'gc', '--aggressive', '--prune=now'], - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - encoding='utf-8', - preexec_fn = _bypass_limits, - ) - if r.returncode: - _gc.warning("Maintenance failure (status=%d):\n%s", r.returncode, r.stdout) diff --git a/forwardport/models/project.py b/forwardport/models/project.py index 331b7c3f..41e03abf 100644 --- a/forwardport/models/project.py +++ b/forwardport/models/project.py @@ -11,36 +11,28 @@ means PR creation is trickier (as mergebot assumes opened event will always lead to PR creation but fpbot wants to attach meaning to the PR when setting it up), ... """ -import ast -import base64 -import collections -import contextlib +from __future__ import annotations + +import builtins import datetime import itertools import json import logging import operator -import os -import pathlib -import re import subprocess -import tempfile import typing import dateutil.relativedelta import requests -import resource -from odoo import _, models, fields, api -from odoo.osv import expression +from odoo import models, fields, api from odoo.exceptions import UserError +from odoo.osv import expression from odoo.tools.misc import topological_sort, groupby -from odoo.tools.sql import reverse_order -from odoo.tools.appdirs import user_cache_dir -from odoo.addons.runbot_merge import utils -from odoo.addons.runbot_merge.models.pull_requests import RPLUS - -footer = '\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n' +from odoo.addons.base.models.res_partner import Partner +from odoo.addons.runbot_merge import git, utils +from odoo.addons.runbot_merge.models.pull_requests import Branch +from odoo.addons.runbot_merge.models.stagings_create import Message DEFAULT_DELTA = dateutil.relativedelta.relativedelta(days=3) @@ -49,45 +41,8 @@ _logger = logging.getLogger('odoo.addons.forwardport') class Project(models.Model): _inherit = 'runbot_merge.project' - fp_github_token = fields.Char() - fp_github_name = fields.Char(store=True, compute="_compute_git_identity") - fp_github_email = fields.Char(store=True, compute="_compute_git_identity") - - def _find_commands(self, comment): - if self.env.context.get('without_forward_port'): - return super()._find_commands(comment) - - return re.findall( - r'^\s*[@|#]?{}:? (.*)$'.format(self.fp_github_name), - comment, re.MULTILINE | re.IGNORECASE - ) + super()._find_commands(comment) - - # technically the email could change at any moment... - @api.depends('fp_github_token') - def _compute_git_identity(self): - s = requests.Session() - for project in self: - if not project.fp_github_token: - continue - r0 = s.get('https://api.github.com/user', headers={ - 'Authorization': 'token %s' % project.fp_github_token - }) - if 'user:email' not in set(re.split(r',\s*', r0.headers['x-oauth-scopes'])): - raise UserError(_("The forward-port github token needs the user:email scope to fetch the bot's identity.")) - r1 = s.get('https://api.github.com/user/emails', headers={ - 'Authorization': 'token %s' % project.fp_github_token - }) - if not (r0.ok and r1.ok): - _logger.error("Failed to fetch bot information for project %s: %s", project.name, (r0.text or r0.content) if not r0.ok else (r1.text or r1.content)) - continue - project.fp_github_name = r0.json()['login'] - project.fp_github_email = next(( - entry['email'] - for entry in r1.json() - if entry['primary'] - ), None) - if not project.fp_github_email: - raise UserError(_("The forward-port bot needs a primary email set up.")) + id: int + github_prefix: str def write(self, vals): # check on branches both active and inactive so disabling branches doesn't @@ -106,34 +61,42 @@ class Project(models.Model): because no CI or CI failed), create followup, as if the branch had been originally disabled (and thus skipped over) """ - PRs = self.env['runbot_merge.pull_requests'] + Batch = self.env['runbot_merge.batch'] + ported = self.env['runbot_merge.pull_requests'] for p in self: actives = previously_active_branches[p] for deactivated in p.branch_ids.filtered(lambda b: not b.active) & actives: - # if a PR targets a deactivated branch, and that's not its limit, - # and it doesn't have a child (e.g. CI failed), enqueue a forward - # port as if the now deactivated branch had been skipped over (which - # is the normal fw behaviour) - extant = PRs.search([ + # if a non-merged batch targets a deactivated branch which is + # not its limit + extant = Batch.search([ + ('parent_id', '!=', False), ('target', '=', deactivated.id), - ('source_id.limit_id', '!=', deactivated.id), - ('state', 'not in', ('closed', 'merged')), - ]) - for p in extant.with_context(force_fw=True): - next_target = p.source_id._find_next_target(p) - # should not happen since we already filtered out limits - if not next_target: - continue + # if at least one of the PRs has a different limit + ('prs.limit_id', '!=', deactivated.id), + ('merge_date', '=', False), + ]).filtered(lambda b:\ + # and has a next target (should already be a function of + # the search but doesn't hurt) + b._find_next_target() \ + # and has not already been forward ported + and Batch.search_count([('parent_id', '=', b.id)]) == 0 + ) - # check if it has a descendant in the next branch, if so skip - if PRs.search_count([ - ('source_id', '=', p.source_id.id), - ('target', '=', next_target.id) - ]): - continue + # PRs may have different limits in the same batch so only notify + # those which actually needed porting + ported |= extant._schedule_fp_followup(force_fw=True)\ + .prs.filtered(lambda p: p._find_next_target()) - # otherwise enqueue a followup - p._schedule_fp_followup() + if not ported: + return + + for feedback in self.env['runbot_merge.pull_requests.feedback'].search(expression.OR( + [('repository', '=', p.repository.id), ('pull_request', '=', p.number)] + for p in ported + )): + # FIXME: better signal + if 'disabled' in feedback.message: + feedback.message += '\n\nAs this was not its limit, it will automatically be forward ported to the next active branch.' def _insert_intermediate_prs(self, branches_before): """If new branches have been added to the sequence inbetween existing @@ -191,91 +154,70 @@ class Project(models.Model): # the parents linked list, so it has a special type for _, cs in groupby(candidates, key=lambda p: p.label): self.env['forwardport.batches'].create({ - 'batch_id': self.env['runbot_merge.batch'].create({ - 'target': before[-1].id, - 'prs': [(4, c.id, 0) for c in cs], - 'active': False, - }).id, + 'batch_id': cs[0].batch_id.id, 'source': 'insert', }) - def _forward_port_ordered(self, domain=()): - Branches = self.env['runbot_merge.branch'] - return Branches.search(expression.AND([ - [('project_id', '=', self.id)], - domain or [], - ]), order=reverse_order(Branches._order)) - class Repository(models.Model): _inherit = 'runbot_merge.repository' + + id: int + project_id: Project + name: str + branch_filter: str fp_remote_target = fields.Char(help="where FP branches get pushed") -class Branch(models.Model): - _inherit = 'runbot_merge.branch' - - fp_target = fields.Boolean(default=True) - fp_enabled = fields.Boolean(compute='_compute_fp_enabled') - - @api.depends('active', 'fp_target') - def _compute_fp_enabled(self): - for b in self: - b.fp_enabled = b.active and b.fp_target - class PullRequests(models.Model): _inherit = 'runbot_merge.pull_requests' - limit_id = fields.Many2one('runbot_merge.branch', help="Up to which branch should this PR be forward-ported") + id: int + display_name: str + number: int + repository: Repository + target: Branch + reviewed_by: Partner + head: str + state: str + merge_date: datetime.datetime + parent_id: PullRequests - parent_id = fields.Many2one( - 'runbot_merge.pull_requests', index=True, - help="a PR with a parent is an automatic forward port" - ) - source_id = fields.Many2one('runbot_merge.pull_requests', index=True, help="the original source of this FP even if parents were detached along the way") - forwardport_ids = fields.One2many('runbot_merge.pull_requests', 'source_id') reminder_backoff_factor = fields.Integer(default=-4, group_operator=None) - merge_date = fields.Datetime() - detach_reason = fields.Char() + @api.model_create_multi + def create(self, vals_list): + created = [] + to_create = [] + old = self.browse(()) + for vals in vals_list: + # PR opened event always creates a new PR, override so we can precreate PRs + existing = self.search([ + ('repository', '=', vals['repository']), + ('number', '=', vals['number']), + ]) + created.append(not existing) + if existing: + old |= existing + continue - fw_policy = fields.Selection([ - ('ci', "Normal"), - ('skipci', "Skip CI"), - # ('skipmerge', "Skip merge"), - ], required=True, default="ci") + to_create.append(vals) + if vals.get('parent_id') and 'source_id' not in vals: + vals['source_id'] = self.browse(vals['parent_id']).root_id.id + new = super().create(to_create) - _sql_constraints = [( - 'fw_constraint', - 'check(source_id is null or num_nonnulls(parent_id, detach_reason) = 1)', - "fw PRs must either be attached or have a reason for being detached", - )] + for pr in new: + # added a new PR to an already forward-ported batch: port the PR + if self.env['runbot_merge.batch'].search_count([ + ('parent_id', '=', pr.batch_id.id), + ]): + self.env['forwardport.batches'].create({ + 'batch_id': pr.batch_id.id, + 'source': 'complete', + 'pr_id': pr.id, + }) - refname = fields.Char(compute='_compute_refname') - @api.depends('label') - def _compute_refname(self): - for pr in self: - pr.refname = pr.label.split(':', 1)[-1] - - @api.model_create_single - def create(self, vals): - # PR opened event always creates a new PR, override so we can precreate PRs - existing = self.search([ - ('repository', '=', vals['repository']), - ('number', '=', vals['number']), - ]) - if existing: - return existing - - if 'limit_id' not in vals: - branch = self.env['runbot_merge.branch'].browse(vals['target']) - repo = self.env['runbot_merge.repository'].browse(vals['repository']) - vals['limit_id'] = branch.project_id._forward_port_ordered( - ast.literal_eval(repo.branch_filter or '[]') - )[-1].id - if vals.get('parent_id') and 'source_id' not in vals: - vals['source_id'] = self.browse(vals['parent_id'])._get_root().id - if vals.get('state') == 'merged': - vals['merge_date'] = fields.Datetime.now() - return super().create(vals) + new = iter(new) + old = iter(old) + return self.browse(next(new).id if c else next(old).id for c in created) def write(self, vals): # if the PR's head is updated, detach (should split off the FP lines as this is not the original code) @@ -284,7 +226,12 @@ class PullRequests(models.Model): # also a bit odd to only handle updating 1 head at a time, but then # again 2 PRs with same head is weird so... newhead = vals.get('head') - with_parents = self.filtered('parent_id') + with_parents = { + p: p.parent_id + for p in self + if p.state not in ('merged', 'closed') + if p.parent_id + } closed_fp = self.filtered(lambda p: p.state == 'closed' and p.source_id) if newhead and not self.env.context.get('ignore_head_update') and newhead != self.head: vals.setdefault('parent_id', False) @@ -294,264 +241,32 @@ class PullRequests(models.Model): # updating children if self.search_count([('parent_id', '=', self.id)]): self.env['forwardport.updates'].create({ - 'original_root': self._get_root().id, + 'original_root': self.root_id.id, 'new_root': self.id }) if vals.get('parent_id') and 'source_id' not in vals: - vals['source_id'] = self.browse(vals['parent_id'])._get_root().id - if vals.get('state') == 'merged': - vals['merge_date'] = fields.Datetime.now() + parent = self.browse(vals['parent_id']) + vals['source_id'] = (parent.source_id or parent).id r = super().write(vals) if self.env.context.get('forwardport_detach_warn', True): - for p in with_parents: - if not p.parent_id: - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': p.repository.id, - 'pull_request': p.number, - 'message': "%sthis PR was modified / updated and has become a normal PR. " - "It should be merged the normal way (via @%s)" % ( - p.source_id.ping(), - p.repository.project_id.github_prefix, - ), - 'token_field': 'fp_github_token', - }) - for p in closed_fp.filtered(lambda p: p.state != 'closed'): - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': p.repository.id, - 'pull_request': p.number, - 'message': "%sthis PR was closed then reopened. " - "It should be merged the normal way (via @%s)" % ( - p.source_id.ping(), - p.repository.project_id.github_prefix, - ), - 'token_field': 'fp_github_token', - }) - if vals.get('state') == 'merged': - for p in self: - self.env['forwardport.branch_remover'].create({ - 'pr_id': p.id, - }) - # if we change the policy to skip CI, schedule followups on existing FPs - if vals.get('fw_policy') == 'skipci' and self.state == 'merged': - self.env['runbot_merge.pull_requests'].search([ - ('source_id', '=', self.id), - ('state', 'not in', ('closed', 'merged')), - ])._schedule_fp_followup() - return r - - def _try_closing(self, by): - r = super()._try_closing(by) - if r: - self.with_context(forwardport_detach_warn=False).write({ - 'parent_id': False, - 'detach_reason': f"Closed by {by}", - }) - self.search([('parent_id', '=', self.id)]).write({ - 'parent_id': False, - 'detach_reason': f"{by} closed parent PR {self.display_name}", - }) - return r - - def _parse_commands(self, author, comment, login): - super(PullRequests, self.with_context(without_forward_port=True))._parse_commands(author, comment, login) - - tokens = [ - token - for line in re.findall(r'^\s*[@|#]?{}:? (.*)$'.format(self.repository.project_id.fp_github_name), comment['body'] or '', re.MULTILINE | re.IGNORECASE) - for token in line.split() - ] - if not tokens: - _logger.info("found no commands in comment of %s (%s) (%s)", author.github_login, author.display_name, - utils.shorten(comment['body'] or '', 50) - ) - return - - # TODO: don't use a mutable tokens iterator - tokens = iter(tokens) - while True: - token = next(tokens, None) - if token is None: - break - - ping = False - close = False - msg = None - if token in ('ci', 'skipci'): - pr = (self.source_id or self) - if pr._pr_acl(author).is_reviewer: - pr.fw_policy = token - msg = "Not waiting for CI to create followup forward-ports." if token == 'skipci' else "Waiting for CI to create followup forward-ports." - else: - ping = True - msg = "you can't configure ci." - - if token == 'ignore': # replace 'ignore' by 'up to <pr_branch>' - token = 'up' - tokens = itertools.chain(['to', self.target.name], tokens) - - if token in ('r+', 'review+'): - if not self.source_id: - ping = True - msg = "I can only do this on forward-port PRs and this is not one, see {}.".format( - self.repository.project_id.github_prefix + for p, parent in with_parents.items(): + if p.parent_id: + continue + self.env.ref('runbot_merge.forwardport.update.detached')._send( + repository=p.repository, + pull_request=p.number, + token_field='fp_github_token', + format_args={'pr': p}, + ) + if parent.state not in ('closed', 'merged'): + self.env.ref('runbot_merge.forwardport.update.parent')._send( + repository=parent.repository, + pull_request=parent.number, + token_field='fp_github_token', + format_args={'pr': parent, 'child': p}, ) - elif not self.parent_id: - ping = True - msg = "I can only do this on unmodified forward-port PRs, ask {}.".format( - self.repository.project_id.github_prefix - ) - else: - merge_bot = self.repository.project_id.github_prefix - # don't update the root ever - for pr in (p for p in self._iter_ancestors() if p.parent_id if p.state in RPLUS): - # only the author is delegated explicitely on the - pr._parse_commands(author, {**comment, 'body': merge_bot + ' r+'}, login) - elif token == 'close': - if self.source_id._pr_acl(author).is_reviewer: - close = True - else: - ping = True - msg = "you can't close PRs." - - elif token == 'up' and next(tokens, None) == 'to': - limit = next(tokens, None) - ping = True - if not self._pr_acl(author).is_author: - msg = "you can't set a forward-port limit.".format(login) - elif not limit: - msg = "please provide a branch to forward-port to." - else: - limit_id = self.env['runbot_merge.branch'].with_context(active_test=False).search([ - ('project_id', '=', self.repository.project_id.id), - ('name', '=', limit), - ]) - if self.source_id: - msg = "forward-port limit can only be set on " \ - f"an origin PR ({self.source_id.display_name} " \ - "here) before it's merged and forward-ported." - elif self.state in ['merged', 'closed']: - msg = "forward-port limit can only be set before the PR is merged." - elif not limit_id: - msg = "there is no branch %r, it can't be used as a forward port target." % limit - elif limit_id == self.target: - ping = False - msg = "Forward-port disabled." - self.limit_id = limit_id - elif not limit_id.fp_enabled: - msg = "branch %r is disabled, it can't be used as a forward port target." % limit_id.name - else: - ping = False - msg = "Forward-porting to %r." % limit_id.name - self.limit_id = limit_id - - if msg or close: - if msg: - _logger.info("%s [%s]: %s", self.display_name, login, msg) - else: - _logger.info("%s [%s]: closing", self.display_name, login) - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': self.repository.id, - 'pull_request': self.number, - 'message': f'@{author.github_login} {msg}' if msg and ping else msg, - 'close': close, - 'token_field': 'fp_github_token', - }) - - def _notify_ci_failed(self, ci): - # only care about FP PRs which are not staged / merged yet - # NB: probably ignore approved PRs as normal message will handle them? - if not (self.state == 'opened' and self.parent_id): - return - - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': self.repository.id, - 'pull_request': self.number, - 'token_field': 'fp_github_token', - 'message': '%s%s failed on this forward-port PR' % ( - self.source_id.ping(), - ci, - ) - }) - - def _validate(self, statuses): - failed = super()._validate(statuses) - self._schedule_fp_followup() - return failed - - def _schedule_fp_followup(self): - _logger = logging.getLogger(__name__).getChild('forwardport.next') - # if the PR has a parent and is CI-validated, enqueue the next PR - for pr in self: - _logger.info('Checking if forward-port %s (%s)', pr.display_name, pr) - if not pr.parent_id: - _logger.info('-> no parent %s (%s)', pr.display_name, pr.parent_id) - continue - if not self.env.context.get('force_fw') and self.source_id.fw_policy != 'skipci' and pr.state not in ['validated', 'ready']: - _logger.info('-> wrong state %s (%s)', pr.display_name, pr.state) - continue - - # check if we've already forward-ported this branch: - # it has a batch without a staging - batch = self.env['runbot_merge.batch'].with_context(active_test=False).search([ - ('staging_id', '=', False), - ('prs', 'in', pr.id), - ], limit=1) - # if the batch is inactive, the forward-port has been done *or* - # the PR's own forward port is in error, so bail - if not batch.active: - _logger.info('-> forward port done or in error (%s.active=%s)', batch, batch.active) - continue - - # otherwise check if we already have a pending forward port - _logger.info("%s %s %s", pr.display_name, batch, ', '.join(batch.mapped('prs.display_name'))) - if self.env['forwardport.batches'].search_count([('batch_id', '=', batch.id)]): - _logger.warning('-> already recorded') - continue - - # check if batch-mate are all valid - mates = batch.prs - # wait until all of them are validated or ready - if not self.env.context.get('force_fw') and any(pr.source_id.fw_policy != 'skipci' and pr.state not in ('validated', 'ready') for pr in mates): - _logger.info("-> not ready (%s)", [(pr.display_name, pr.state) for pr in mates]) - continue - - # check that there's no weird-ass state - if not all(pr.parent_id for pr in mates): - _logger.warning("Found a batch (%s) with only some PRs having parents, ignoring", mates) - continue - if self.search_count([('parent_id', 'in', mates.ids)]): - _logger.warning("Found a batch (%s) with only some of the PRs having children", mates) - continue - - _logger.info('-> ok') - self.env['forwardport.batches'].create({ - 'batch_id': batch.id, - 'source': 'fp', - }) - - def _find_next_target(self, reference): - """ Finds the branch between target and limit_id which follows - reference - """ - if reference.target == self.limit_id: - return - # NOTE: assumes even disabled branches are properly sequenced, would - # probably be a good idea to have the FP view show all branches - branches = list(self.target.project_id - .with_context(active_test=False) - ._forward_port_ordered(ast.literal_eval(self.repository.branch_filter or '[]'))) - - # get all branches between max(root.target, ref.target) (excluded) and limit (included) - from_ = max(branches.index(self.target), branches.index(reference.target)) - to_ = branches.index(self.limit_id) - - # return the first active branch in the set - return next(( - branch - for branch in branches[from_+1:to_+1] - if branch.fp_enabled - ), None) + return r def _commits_lazy(self): s = requests.Session() @@ -581,538 +296,197 @@ class PullRequests(models.Model): } return sorted(commits, key=lambda c: idx[c['sha']]) - def _iter_descendants(self): - pr = self - while True: - pr = self.search([('parent_id', '=', pr.id)]) - if pr: - yield pr - else: - break - @api.depends('parent_id.statuses') - def _compute_statuses(self): - super()._compute_statuses() - - def _get_overrides(self): - # NB: assumes _get_overrides always returns an "owned" dict which we can modify - p = self.parent_id._get_overrides() if self.parent_id else {} - p.update(super()._get_overrides()) - return p - - def _iter_ancestors(self): - while self: - yield self - self = self.parent_id - - def _get_root(self): - root = self - while root.parent_id: - root = root.parent_id - return root - - def _port_forward(self): - if not self: - return - - all_sources = [(p.source_id or p) for p in self] - all_targets = [s._find_next_target(p) for s, p in zip(all_sources, self)] - - ref = self[0] - base = all_sources[0] - target = all_targets[0] - if target is None: - _logger.info( - "Will not forward-port %s: no next target", - ref.display_name, - ) - return # QUESTION: do the prs need to be updated? - - # check if the PRs have already been forward-ported: is there a PR - # with the same source targeting the next branch in the series - for source in all_sources: - if self.search_count([('source_id', '=', source.id), ('target', '=', target.id)]): - _logger.info("Will not forward-port %s: already ported", ref.display_name) - return - - # check if all PRs in the batch have the same "next target" , bail if - # that's not the case as it doesn't make sense for forward one PR from - # a to b and a linked pr from a to c - different_target = next((t for t in all_targets if t != target), None) - if different_target: - different_pr = next(p for p, t in zip(self, all_targets) if t == different_target) - for pr, t in zip(self, all_targets): - linked, other = different_pr, different_target - if t != target: - linked, other = ref, target - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': pr.repository.id, - 'pull_request': pr.number, - 'token_field': 'fp_github_token', - 'message': "%sthis pull request can not be forward ported: " - "next branch is %r but linked pull request %s " - "has a next branch %r." % ( - pr.ping(), t.name, linked.display_name, other.name - ) - }) - _logger.warning( - "Cancelling forward-port of %s: found different next branches (%s)", - self, all_targets - ) - return - - proj = self.mapped('target.project_id') - if not proj.fp_github_token: - _logger.warning( - "Can not forward-port %s: no token on project %s", - ref.display_name, - proj.name - ) - return - - notarget = [p.repository.name for p in self if not p.repository.fp_remote_target] - if notarget: - _logger.error( - "Can not forward-port %s: repos %s don't have a remote configured", - self, ', '.join(notarget) - ) - return - - # take only the branch bit - new_branch = '%s-%s-%s-fw' % ( - target.name, - base.refname, - # avoid collisions between fp branches (labels can be reused - # or conflict especially as we're chopping off the owner) - base64.urlsafe_b64encode(os.urandom(3)).decode() - ) - # TODO: send outputs to logging? - conflicts = {} - with contextlib.ExitStack() as s: - for pr in self: - conflicts[pr], working_copy = pr._create_fp_branch( - target, new_branch, s) - - working_copy.push('target', new_branch) - - gh = requests.Session() - gh.headers['Authorization'] = 'token %s' % proj.fp_github_token - has_conflicts = any(conflicts.values()) - # problemo: this should forward port a batch at a time, if porting - # one of the PRs in the batch fails is huge problem, though this loop - # only concerns itself with the creation of the followup objects so... - new_batch = self.browse(()) - for pr in self: - owner, _ = pr.repository.fp_remote_target.split('/', 1) - source = pr.source_id or pr - root = pr._get_root() - - message = source.message + '\n\n' + '\n'.join( - "Forward-Port-Of: %s" % p.display_name - for p in root | source - ) - - title, body = re.match(r'(?P<title>[^\n]+)\n*(?P<body>.*)', message, flags=re.DOTALL).groups() - self.env.cr.execute('LOCK runbot_merge_pull_requests IN SHARE MODE') - r = gh.post(f'https://api.github.com/repos/{pr.repository.name}/pulls', json={ - 'base': target.name, - 'head': f'{owner}:{new_branch}', - 'title': '[FW]' + (' ' if title[0] != '[' else '') + title, - 'body': body - }) - if not r.ok: - _logger.warning("Failed to create forward-port PR for %s, deleting branches", pr.display_name) - # delete all the branches this should automatically close the - # PRs if we've created any. Using the API here is probably - # simpler than going through the working copies - for repo in self.mapped('repository'): - d = gh.delete(f'https://api.github.com/repos/{repo.fp_remote_target}/git/refs/heads/{new_branch}') - if d.ok: - _logger.info("Deleting %s:%s=success", repo.fp_remote_target, new_branch) - else: - _logger.warning("Deleting %s:%s=%s", repo.fp_remote_target, new_branch, d.text) - raise RuntimeError("Forwardport failure: %s (%s)" % (pr.display_name, r.text)) - - new_pr = self._from_gh(r.json()) - _logger.info("Created forward-port PR %s", new_pr) - new_batch |= new_pr - - # allows PR author to close or skipci - source.delegates |= source.author - new_pr.write({ - 'merge_method': pr.merge_method, - 'source_id': source.id, - # only link to previous PR of sequence if cherrypick passed - 'parent_id': pr.id if not has_conflicts else False, - 'detach_reason': "conflicts: {}".format( - f'\n{conflicts[pr]}\n{conflicts[pr]}'.strip() - ) if has_conflicts else None, - # Copy author & delegates of source as well as delegates of - # previous so they can r+ the new forward ports. - 'delegates': [(6, False, (source.delegates | pr.delegates).ids)] - }) - if has_conflicts and pr.parent_id and pr.state not in ('merged', 'closed'): - message = source.ping() + """\ -the next pull request (%s) is in conflict. You can merge the chain up to here by saying -> @%s r+ -%s""" % (new_pr.display_name, pr.repository.project_id.fp_github_name, footer) - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': pr.repository.id, - 'pull_request': pr.number, - 'message': message, - 'token_field': 'fp_github_token', - }) - # not great but we probably want to avoid the risk of the webhook - # creating the PR from under us. There's still a "hole" between - # the POST being executed on gh and the commit but... - self.env.cr.commit() - - for pr, new_pr in zip(self, new_batch): - source = pr.source_id or pr - (h, out, err, hh) = conflicts.get(pr) or (None, None, None, None) - - if h: - sout = serr = '' - if out.strip(): - sout = f"\nstdout:\n```\n{out}\n```\n" - if err.strip(): - serr = f"\nstderr:\n```\n{err}\n```\n" - - lines = '' - if len(hh) > 1: - lines = '\n' + ''.join( - '* %s%s\n' % (sha, ' <- on this commit' if sha == h else '') - for sha in hh - ) - message = f"""{source.ping()}cherrypicking of pull request {source.display_name} failed. -{lines}{sout}{serr} -Either perform the forward-port manually (and push to this branch, proceeding as usual) or close this PR (maybe?). - -In the former case, you may want to edit this PR message as well. -""" - elif has_conflicts: - message = """%s\ -while this was properly forward-ported, at least one co-dependent PR (%s) did \ -not succeed. You will need to fix it before this can be merged. - -Both this PR and the others will need to be approved via `@%s r+` as they are \ -all considered "in conflict". -%s""" % ( - source.ping(), - ', '.join(p.display_name for p in (new_batch - new_pr)), - proj.github_prefix, - footer - ) - elif base._find_next_target(new_pr) is None: - ancestors = "".join( - "* %s\n" % p.display_name - for p in pr._iter_ancestors() - if p.parent_id - ) - message = source.ping() + """\ -this PR targets %s and is the last of the forward-port chain%s -%s -To merge the full chain, say -> @%s r+ -%s""" % (target.name, ' containing:' if ancestors else '.', ancestors, pr.repository.project_id.fp_github_name, footer) - else: - message = """\ -This PR targets %s and is part of the forward-port chain. Further PRs will be created up to %s. -%s""" % (target.name, base.limit_id.name, footer) - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': new_pr.repository.id, - 'pull_request': new_pr.number, - 'message': message, - 'token_field': 'fp_github_token', - }) - labels = ['forwardport'] - if has_conflicts: - labels.append('conflict') - self.env['runbot_merge.pull_requests.tagging'].create({ - 'repository': new_pr.repository.id, - 'pull_request': new_pr.number, - 'tags_add': labels, - }) - - # batch the PRs so _validate can perform the followup FP properly - # (with the entire batch). If there are conflict then create a - # deactivated batch so the interface is coherent but we don't pickup - # an active batch we're never going to deactivate. - b = self.env['runbot_merge.batch'].create({ - 'target': target.id, - 'prs': [(6, 0, new_batch.ids)], - 'active': not has_conflicts, - }) - # if we're not waiting for CI, schedule followup immediately - if any(p.source_id.fw_policy == 'skipci' for p in b.prs): - b.prs[0]._schedule_fp_followup() - return b - - @property - def _source_url(self): - return 'https://{}:{}@github.com/{}'.format( - self.repository.project_id.fp_github_name or '', - self.repository.project_id.fp_github_token, - self.repository.name, - ) - - def _create_fp_branch(self, target_branch, fp_branch_name, cleanup): + def _create_fp_branch(self, source, target_branch): """ Creates a forward-port for the current PR to ``target_branch`` under ``fp_branch_name``. :param target_branch: the branch to port forward to - :param fp_branch_name: the name of the branch to create the FP under - :param ExitStack cleanup: so the working directories can be cleaned up - :return: A pair of an optional conflict information and a repository. If - present the conflict information is composed of the hash of the - conflicting commit, the stderr and stdout of the failed - cherrypick and a list of all PR commit hashes :rtype: (None | (str, str, str, list[commit]), Repo) """ logger = _logger.getChild(str(self.id)) - root = self._get_root() + root = self.root_id logger.info( "Forward-porting %s (%s) to %s", self.display_name, root.display_name, target_branch.name ) - source = self._get_local_directory() - r = source.with_config(stdout=subprocess.PIPE, stderr=subprocess.STDOUT).fetch() - logger.info("Updated cache repo %s:\n%s", source._directory, r.stdout.decode()) + fetch = source.with_config(stdout=subprocess.PIPE, stderr=subprocess.STDOUT).fetch() + logger.info("Updated cache repo %s:\n%s", source._directory, fetch.stdout.decode()) - logger.info("Create working copy...") - working_copy = source.clone( - cleanup.enter_context( - tempfile.TemporaryDirectory( - prefix='%s-to-%s-' % ( - root.display_name, - target_branch.name - ), - dir=user_cache_dir('forwardport') - )), - branch=target_branch.name - ) - - r = working_copy.with_config(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) \ - .fetch(self._source_url, root.head) - logger.info( - "Fetched head of %s into %s:\n%s", - root.display_name, - working_copy._directory, - r.stdout.decode() - ) - if working_copy.check(False).cat_file(e=root.head).returncode: + head_fetch = source.with_config(stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=False) \ + .fetch(git.source_url(self.repository), root.head) + if head_fetch.returncode: raise ForwardPortError( f"During forward port of {self.display_name}, unable to find " f"expected head of {root.display_name} ({root.head})" ) - project_id = self.repository.project_id - # add target remote - working_copy.remote( - 'add', 'target', - 'https://{p.fp_github_name}:{p.fp_github_token}@github.com/{r.fp_remote_target}'.format( - r=self.repository, - p=project_id - ) + logger.info( + "Fetched head of %s (%s):\n%s", + root.display_name, + root.head, + head_fetch.stdout.decode() ) - logger.info("Create FP branch %s in %s", fp_branch_name, working_copy._directory) - working_copy.checkout(b=fp_branch_name) try: - root._cherry_pick(working_copy) - return None, working_copy + return None, root._cherry_pick(source, target_branch.name) except CherrypickError as e: h, out, err, commits = e.args - # using git diff | git apply -3 to get the entire conflict set - # turns out to not work correctly: in case files have been moved - # / removed (which turns out to be a common source of conflicts - # when forward-porting) it'll just do nothing to the working copy - # so the "conflict commit" will be empty - # switch to a squashed-pr branch - working_copy.check(True).checkout('-bsquashed', root.head) # commits returns oldest first, so youngest (head) last head_commit = commits[-1]['commit'] to_tuple = operator.itemgetter('name', 'email') - to_dict = lambda term, vals: { - 'GIT_%s_NAME' % term: vals[0], - 'GIT_%s_EMAIL' % term: vals[1], - 'GIT_%s_DATE' % term: vals[2], - } authors, committers = set(), set() - for c in (c['commit'] for c in commits): - authors.add(to_tuple(c['author'])) - committers.add(to_tuple(c['committer'])) - fp_authorship = (project_id.fp_github_name, '', '') - author = fp_authorship if len(authors) != 1\ + for commit in (c['commit'] for c in commits): + authors.add(to_tuple(commit['author'])) + committers.add(to_tuple(commit['committer'])) + fp_authorship = (self.repository.project_id.fp_github_name, '', '') + author = fp_authorship if len(authors) != 1 \ else authors.pop() + (head_commit['author']['date'],) committer = fp_authorship if len(committers) != 1 \ else committers.pop() + (head_commit['committer']['date'],) - conf = working_copy.with_config(env={ - **to_dict('AUTHOR', author), - **to_dict('COMMITTER', committer), - 'GIT_COMMITTER_DATE': '', - }) - # squash to a single commit - conf.reset('--soft', commits[0]['parents'][0]['sha']) - conf.commit(a=True, message="temp") - squashed = conf.stdout().rev_parse('HEAD').stdout.strip().decode() + conf = source.with_params( + 'merge.renamelimit=0', + 'merge.renames=copies', + 'merge.conflictstyle=zdiff3' + ).with_config(stdout=subprocess.PIPE, stderr=subprocess.PIPE) - # switch back to the PR branch - conf.checkout(fp_branch_name) - # cherry-pick the squashed commit to generate the conflict - conf.with_params('merge.renamelimit=0', 'merge.conflictstyle=diff3')\ - .with_config(check=False)\ - .cherry_pick(squashed, no_commit=True) - status = conf.stdout().status(short=True, untracked_files='no').stdout.decode() - if err.strip(): - err = err.rstrip() + '\n----------\nstatus:\n' + status - else: - err = 'status:\n' + status + tree = conf.with_config(check=False).merge_tree( + '--merge-base', commits[0]['parents'][0]['sha'], + target_branch.name, + root.head, + ) # if there was a single commit, reuse its message when committing # the conflict - # TODO: still add conflict information to this? if len(commits) == 1: msg = root._make_fp_message(commits[0]) - conf.with_config(input=str(msg).encode()) \ - .commit(all=True, allow_empty=True, file='-') else: - conf.commit( - all=True, allow_empty=True, - message="""Cherry pick of %s failed + out = utils.shorten(out, 8*1024, '[...]') + err = utils.shorten(err, 8*1024, '[...]') + msg = f"""Cherry pick of {h} failed stdout: -%s +{out} stderr: -%s -""" % (h, out, err)) - return (h, out, err, [c['sha'] for c in commits]), working_copy +{err} +""" - def _cherry_pick(self, working_copy): - """ Cherrypicks ``self`` into the working copy + target_head = source.stdout().rev_parse(target_branch.name).stdout.decode().strip() + commit = conf.commit_tree( + tree=tree.stdout.decode().splitlines(keepends=False)[0], + parents=[target_head], + message=str(msg), + author=author, + committer=committer[:2], + ) + assert commit.returncode == 0,\ + f"commit failed\n\n{commit.stdout.decode()}\n\n{commit.stderr.decode}" + hh = commit.stdout.strip() - :return: ``True`` if the cherrypick was successful, ``False`` otherwise + return (h, out, err, [c['sha'] for c in commits]), hh + + def _cherry_pick(self, repo: git.Repo, branch: Branch) -> str: + """ Cherrypicks ``self`` into ``branch`` + + :return: the HEAD of the forward-port is successful + :raises CherrypickError: in case of conflict """ # <xxx>.cherrypick.<number> logger = _logger.getChild(str(self.id)).getChild('cherrypick') - # original head so we can reset - prev = original_head = working_copy.stdout().rev_parse('HEAD').stdout.decode().strip() + # target's head + head = repo.stdout().rev_parse(branch).stdout.decode().strip() commits = self.commits() - logger.info("%s: copy %s commits to %s\n%s", self, len(commits), original_head, '\n'.join( - '- %s (%s)' % (c['sha'], c['commit']['message'].splitlines()[0]) - for c in commits - )) + logger.info( + "%s: copy %s commits to %s (%s)%s", + self, len(commits), branch, head, ''.join( + '\n- %s: %s' % (c['sha'], c['commit']['message'].splitlines()[0]) + for c in commits + ) + ) + conf = repo.with_params( + 'merge.renamelimit=0', + 'merge.renames=copies', + ).with_config( + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + check=False, + ) for commit in commits: commit_sha = commit['sha'] - # config (global -c) or commit options don't really give access to - # setting dates - cm = commit['commit'] # get the "git" commit object rather than the "github" commit resource - env = { - 'GIT_AUTHOR_NAME': cm['author']['name'], - 'GIT_AUTHOR_EMAIL': cm['author']['email'], - 'GIT_AUTHOR_DATE': cm['author']['date'], - 'GIT_COMMITTER_NAME': cm['committer']['name'], - 'GIT_COMMITTER_EMAIL': cm['committer']['email'], - } - configured = working_copy.with_config(env=env) - - conf = working_copy.with_config( - env={**env, 'GIT_TRACE': 'true'}, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - check=False - ) - # first try with default / low renamelimit - r = conf.cherry_pick(commit_sha) - logger.debug("Cherry-picked %s: %s\n%s\n%s", commit_sha, r.returncode, r.stdout.decode(), _clean_rename(r.stderr.decode())) + # merge-tree is a bit stupid and gets confused when the options + # follow the parameters + r = conf.merge_tree('--merge-base', commit['parents'][0]['sha'], head, commit_sha) + new_tree = r.stdout.decode().splitlines(keepends=False)[0] if r.returncode: - # if it failed, retry with high renamelimit - configured.reset('--hard', prev) - r = conf.with_params('merge.renamelimit=0').cherry_pick(commit_sha) - logger.debug("Cherry-picked %s (renamelimit=0): %s\n%s\n%s", commit_sha, r.returncode, r.stdout.decode(), _clean_rename(r.stderr.decode())) + # For merge-tree the stdout on conflict is of the form + # + # oid of toplevel tree + # conflicted file info+ + # + # informational messages+ + # + # to match cherrypick we only want the informational messages, + # so strip everything else + r.stdout = r.stdout.split(b'\n\n')[-1] + else: + # By default cherry-pick fails if a non-empty commit becomes + # empty (--empty=stop), also it fails when cherrypicking already + # empty commits which I didn't think we prevented but clearly we + # do...? + parent_tree = conf.rev_parse(f'{head}^{{tree}}').stdout.decode().strip() + if parent_tree == new_tree: + r.returncode = 1 + r.stdout = f"You are currently cherry-picking commit {commit_sha}.".encode() + r.stderr = b"The previous cherry-pick is now empty, possibly due to conflict resolution." - if r.returncode: # pick failed, reset and bail + logger.debug("Cherry-picked %s: %s\n%s\n%s", commit_sha, r.returncode, r.stdout.decode(), _clean_rename(r.stderr.decode())) + if r.returncode: # pick failed, bail # try to log inflateInit: out of memory errors as warning, they - # seem to return the status code 128 + # seem to return the status code 128 (nb: may not work anymore + # with merge-tree, idk) logger.log( logging.WARNING if r.returncode == 128 else logging.INFO, "forward-port of %s (%s) failed at %s", self, self.display_name, commit_sha) - configured.reset('--hard', original_head) + raise CherrypickError( commit_sha, r.stdout.decode(), _clean_rename(r.stderr.decode()), commits ) + # get the "git" commit object rather than the "github" commit resource + cc = conf.commit_tree( + tree=new_tree, + parents=[head], + message=str(self._make_fp_message(commit)), + author=map_author(commit['commit']['author']), + committer=map_committer(commit['commit']['committer']), + ) + if cc.returncode: + raise CherrypickError(commit_sha, cc.stdout.decode(), cc.stderr.decode(), commits) - msg = self._make_fp_message(commit) + head = cc.stdout.strip() + logger.info('%s -> %s', commit_sha, head) - # replace existing commit message with massaged one - configured \ - .with_config(input=str(msg).encode())\ - .commit(amend=True, file='-') - prev = configured.stdout().rev_parse('HEAD').stdout.decode() - logger.info('%s: success -> %s', commit_sha, prev) - - def _build_merge_message(self, message, related_prs=()): - msg = super()._build_merge_message(message, related_prs=related_prs) - - # ensures all reviewers in the review path are on the PR in order: - # original reviewer, then last conflict reviewer, then current PR - reviewers = (self | self._get_root() | self.source_id)\ - .mapped('reviewed_by.formatted_email') - - sobs = msg.headers.getlist('signed-off-by') - msg.headers.remove('signed-off-by') - msg.headers.extend( - ('signed-off-by', signer) - for signer in sobs - if signer not in reviewers - ) - msg.headers.extend( - ('signed-off-by', reviewer) - for reviewer in reversed(reviewers) - ) - - return msg + return head def _make_fp_message(self, commit): cmap = json.loads(self.commits_map) - msg = self._parse_commit_message(commit['commit']['message']) + msg = Message.from_message(commit['commit']['message']) # write the *merged* commit as "original", not the PR's msg.headers['x-original-commit'] = cmap.get(commit['sha'], commit['sha']) # don't stringify so caller can still perform alterations return msg - def _get_local_directory(self): - repos_dir = pathlib.Path(user_cache_dir('forwardport')) - repos_dir.mkdir(parents=True, exist_ok=True) - repo_dir = repos_dir / self.repository.name - - if repo_dir.is_dir(): - return git(repo_dir) - else: - _logger.info("Cloning out %s to %s", self.repository.name, repo_dir) - subprocess.run(['git', 'clone', '--bare', self._source_url, str(repo_dir)], check=True) - # bare repos don't have fetch specs by default, and fetching *into* - # them is a pain in the ass, configure fetch specs so `git fetch` - # works properly - repo = git(repo_dir) - repo.config('--add', 'remote.origin.fetch', '+refs/heads/*:refs/heads/*') - # negative refspecs require git 2.29 - repo.config('--add', 'remote.origin.fetch', '^refs/heads/tmp.*') - repo.config('--add', 'remote.origin.fetch', '^refs/heads/staging.*') - return repo - - def _outstanding(self, cutoff): + def _outstanding(self, cutoff: str) -> typing.ItemsView[PullRequests, list[PullRequests]]: """ Returns "outstanding" (unmerged and unclosed) forward-ports whose source was merged before ``cutoff`` (all of them if not provided). - :param str cutoff: a datetime (ISO-8601 formatted) + :param cutoff: a datetime (ISO-8601 formatted) :returns: an iterator of (source, forward_ports) """ return groupby(self.env['runbot_merge.pull_requests'].search([ @@ -1123,67 +497,33 @@ stderr: ('source_id.merge_date', '<', cutoff), ], order='source_id, id'), lambda p: p.source_id) - def _hall_of_shame(self): - """Provides data for the HOS view - - * outstanding forward ports per reviewer - * pull requests with outstanding forward ports, oldest-merged first - """ - cutoff_dt = datetime.datetime.now() - DEFAULT_DELTA - outstanding = self.env['runbot_merge.pull_requests'].search([ - ('source_id', '!=', False), - ('state', 'not in', ['merged', 'closed']), - ('source_id.merge_date', '<', cutoff_dt), - ], order=None) - # only keep merged because apparently some PRs are in a weird spot - # where they're sources but closed? - sources = outstanding.mapped('source_id').filtered('merge_date').sorted('merge_date') - outstandings = [] - reviewers = collections.Counter() - for source in sources: - outstandings.append(Outstanding(source=source, prs=source.forwardport_ids & outstanding)) - reviewers[source.reviewed_by] += 1 - return HallOfShame( - reviewers=reviewers.most_common(), - outstanding=outstandings, - ) - def _reminder(self): - cutoff = self.env.context.get('forwardport_updated_before') \ + cutoff = getattr(builtins, 'forwardport_updated_before', None) \ or fields.Datetime.to_string(datetime.datetime.now() - DEFAULT_DELTA) cutoff_dt = fields.Datetime.from_string(cutoff) for source, prs in self._outstanding(cutoff): backoff = dateutil.relativedelta.relativedelta(days=2**source.reminder_backoff_factor) - prs = list(prs) if source.merge_date > (cutoff_dt - backoff): continue source.reminder_backoff_factor += 1 - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': source.repository.id, - 'pull_request': source.number, - 'message': "%sthis pull request has forward-port PRs awaiting action (not merged or closed):\n%s" % ( - source.ping(), - '\n- '.join(pr.display_name for pr in sorted(prs, key=lambda p: p.number)) - ), - 'token_field': 'fp_github_token', - }) - def ping(self, author=True, reviewer=True): - source = self.source_id - if not source: - return super().ping(author=author, reviewer=reviewer) + # only keep the PRs which don't have an attached descendant) + pr_ids = {p.id for p in prs} + for pr in prs: + pr_ids.discard(pr.parent_id.id) + for pr in (p for p in prs if p.id in pr_ids): + self.env.ref('runbot_merge.forwardport.reminder')._send( + repository=pr.repository, + pull_request=pr.number, + token_field='fp_github_token', + format_args={'pr': pr, 'source': source}, + ) + + +map_author = operator.itemgetter('name', 'email', 'date') +map_committer = operator.itemgetter('name', 'email') - # use a dict literal to maintain ordering (3.6+) - pingline = ' '.join( - f'@{p.github_login}' - for p in filter(None, { - author and source.author: None, - reviewer and source.reviewed_by: None, - reviewer and self.reviewed_by: None, - }) - ) - return pingline and (pingline + ' ') class Stagings(models.Model): _inherit = 'runbot_merge.stagings' @@ -1192,8 +532,11 @@ class Stagings(models.Model): r = super().write(vals) # we've just deactivated a successful staging (so it got ~merged) if vals.get('active') is False and self.state == 'success': - # check al batches to see if they should be forward ported + # check all batches to see if they should be forward ported for b in self.with_context(active_test=False).batch_ids: + if b.fw_policy == 'no': + continue + # if all PRs of a batch have parents they're part of an FP # sequence and thus handled separately, otherwise they're # considered regular merges @@ -1209,89 +552,6 @@ class Feedback(models.Model): token_field = fields.Selection(selection_add=[('fp_github_token', 'Forwardport Bot')]) -ALWAYS = ('gc.auto=0', 'maintenance.auto=0') - -def _bypass_limits(): - resource.setrlimit(resource.RLIMIT_AS, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) - -def git(directory): return Repo(directory, check=True) -class Repo: - def __init__(self, directory, **config): - self._directory = str(directory) - config.setdefault('stderr', subprocess.PIPE) - self._config = config - self._params = () - self._opener = subprocess.run - - def __getattr__(self, name): - return GitCommand(self, name.replace('_', '-')) - - def _run(self, *args, **kwargs): - opts = {**self._config, **kwargs} - args = ('git', '-C', self._directory)\ - + tuple(itertools.chain.from_iterable(('-c', p) for p in self._params + ALWAYS))\ - + args - try: - return self._opener(args, preexec_fn=_bypass_limits, **opts) - except subprocess.CalledProcessError as e: - stream = e.stderr if e.stderr else e.stdout - if stream: - _logger.error("git call error: %s", stream) - raise - - def stdout(self, flag=True): - if flag is True: - return self.with_config(stdout=subprocess.PIPE) - elif flag is False: - return self.with_config(stdout=None) - return self.with_config(stdout=flag) - - def lazy(self): - r = self.with_config() - r._config.pop('check', None) - r._opener = subprocess.Popen - return r - - def check(self, flag): - return self.with_config(check=flag) - - def with_config(self, **kw): - opts = {**self._config, **kw} - r = Repo(self._directory, **opts) - r._opener = self._opener - r._params = self._params - return r - - def with_params(self, *args): - r = self.with_config() - r._params = args - return r - - def clone(self, to, branch=None): - self._run( - 'clone', - *([] if branch is None else ['-b', branch]), - self._directory, to, - ) - return Repo(to) - -class GitCommand: - def __init__(self, repo, name): - self._name = name - self._repo = repo - - def __call__(self, *args, **kwargs): - return self._repo._run(self._name, *args, *self._to_options(kwargs)) - - def _to_options(self, d): - for k, v in d.items(): - if len(k) == 1: - yield '-' + k - else: - yield '--' + k.replace('_', '-') - if v not in (None, True): - assert v is not False - yield str(v) class CherrypickError(Exception): ... diff --git a/forwardport/models/project_freeze.py b/forwardport/models/project_freeze.py index 635912c0..0796fbaf 100644 --- a/forwardport/models/project_freeze.py +++ b/forwardport/models/project_freeze.py @@ -22,5 +22,7 @@ class FreezeWizard(models.Model): def unlink(self): r = super().unlink() if not (self.env.context.get('forwardport_keep_disabled') or self.search_count([])): - self.env.ref('forwardport.port_forward').active = True + cron = self.env.ref('forwardport.port_forward') + cron.active = True + cron._trigger() # process forward ports enqueued during the freeze period return r diff --git a/forwardport/tests/conftest.py b/forwardport/tests/conftest.py index 408345af..8764943d 100644 --- a/forwardport/tests/conftest.py +++ b/forwardport/tests/conftest.py @@ -4,18 +4,6 @@ import re import pytest import requests -@pytest.fixture -def default_crons(): - return [ - 'runbot_merge.process_updated_commits', - 'runbot_merge.merge_cron', - 'runbot_merge.staging_cron', - 'forwardport.port_forward', - 'forwardport.updates', - 'runbot_merge.check_linked_prs_status', - 'runbot_merge.feedback_cron', - ] - # public_repo — necessary to leave comments # admin:repo_hook — to set up hooks (duh) # delete_repo — to cleanup repos created under a user diff --git a/forwardport/tests/test_batches.py b/forwardport/tests/test_batches.py index a637e7a0..2ecab8b6 100644 --- a/forwardport/tests/test_batches.py +++ b/forwardport/tests/test_batches.py @@ -1,4 +1,6 @@ -from utils import Commit, make_basic +import re + +from utils import Commit, make_basic, to_pr, seen, matches def test_single_updated(env, config, make_repo): @@ -87,3 +89,313 @@ def test_single_updated(env, config, make_repo): assert pr22_id.source_id == pr2_id assert pr22_id.parent_id == pr21_id + +def test_closing_during_fp(env, config, make_repo, users): + """ Closing a PR after it's been ported once should not port it further, but + the rest of the batch should carry on + """ + r1, _ = make_basic(env, config, make_repo) + r2, _ = make_basic(env, config, make_repo) + env['runbot_merge.repository'].search([]).required_statuses = 'default' + + with r1, r2: + r1.make_commits('a', Commit('1', tree={'1': '0'}), ref='heads/aref') + pr1 = r1.make_pr(target='a', head='aref') + r1.post_status('aref', 'success') + pr1.post_comment('hansen r+', config['role_reviewer']['token']) + + r2.make_commits('a', Commit('2', tree={'2': '0'}), ref='heads/aref') + pr2 = r2.make_pr(target='a', head='aref') + r2.post_status('aref', 'success') + pr2.post_comment('hansen r+', config['role_reviewer']['token']) + env.run_crons() + + with r1, r2: + r1.post_status('staging.a', 'success') + r2.post_status('staging.a', 'success') + env.run_crons() + + pr1_id = to_pr(env, pr1) + [pr1_1_id] = pr1_id.forwardport_ids + pr2_id = to_pr(env, pr2) + [pr2_1_id] = pr2_id.forwardport_ids + + with r1: + r1.get_pr(pr1_1_id.number).close(config['role_user']['token']) + + with r2: + r2.post_status(pr2_1_id.head, 'success') + env.run_crons() + + assert env['runbot_merge.pull_requests'].search_count([]) == 5,\ + "only one of the forward ports should be ported" + assert not env['runbot_merge.pull_requests'].search([('parent_id', '=', pr1_1_id.id)]),\ + "the closed PR should not be ported" + assert env['runbot_merge.pull_requests'].search([('source_id', '=', pr1_id.id)]) == pr1_1_id,\ + "the closed PR should not be ported" + + r1_b_head = r1.commit("b") + with r2: + r2.get_pr(pr2_1_id.number).post_comment('hansen r+', config['role_reviewer']['token']) + env.run_crons() + assert not pr2_1_id.blocked + assert not pr2_1_id.batch_id.blocked + st = pr2_1_id.staging_id + assert st + with r1, r2: + r1.post_status('staging.b', 'success') + r2.post_status('staging.b', 'success') + env.run_crons() + assert st.state == 'success' + + assert r1_b_head.id == r1.commit("b").id, \ + "r1:b's head should not have been touched" + +def test_add_pr_during_fp(env, config, make_repo, users): + """ It should be possible to add new PRs to an FP batch + """ + r1, _ = make_basic(env, config, make_repo, statuses="default") + r2, fork2 = make_basic(env, config, make_repo, statuses="default") + # needs a "d" branch + env['runbot_merge.project'].search([]).write({ + 'branch_ids': [(0, 0, {'name': 'd', 'sequence': 40})], + }) + with r1, r2: + r1.make_ref("heads/d", r1.commit("c").id) + r2.make_ref("heads/d", r2.commit("c").id) + + with r1: + r1.make_commits('a', Commit('1', tree={'1': '0'}), ref='heads/aref') + pr1_a = r1.make_pr(target='a', head='aref') + r1.post_status('aref', 'success') + pr1_a.post_comment('hansen r+', config['role_reviewer']['token']) + env.run_crons() + + with r1, r2: + r1.post_status('staging.a', 'success') + r2.post_status('staging.a', 'success') + env.run_crons() + + pr1_a_id = to_pr(env, pr1_a) + [pr1_b_id] = pr1_a_id.forwardport_ids + + with r2, fork2: + fork2.make_commits('b', Commit('2', tree={'2': '0'}), ref=f'heads/{pr1_b_id.refname}') + pr2_b = r2.make_pr(title="B", target='b', head=f'{fork2.owner}:{pr1_b_id.refname}') + env.run_crons() + + pr2_b_id = to_pr(env, pr2_b) + + assert not pr1_b_id.staging_id + assert not pr2_b_id.staging_id + assert pr1_b_id.batch_id == pr2_b_id.batch_id + assert pr1_b_id.state == "opened",\ + "implicit approval from forward port should have been canceled" + batch = pr2_b_id.batch_id + + with r1: + r1.post_status(pr1_b_id.head, 'success') + r1.get_pr(pr1_b_id.number).post_comment('hansen r+', config['role_reviewer']['token']) + env.run_crons() + + assert batch.blocked + assert pr1_b_id.blocked + + with r2: + r2.post_status(pr2_b.head, "success") + pr2_b.post_comment("hansen r+", config['role_reviewer']['token']) + env.run_crons() + + assert not batch.blocked + assert pr1_b_id.staging_id and pr1_b_id.staging_id == pr2_b_id.staging_id + + with r1, r2: + r1.post_status('staging.b', 'success') + r2.post_status('staging.b', 'success') + env.run_crons() + + def find_child(pr): + return env['runbot_merge.pull_requests'].search([ + ('parent_id', '=', pr.id), + ]) + pr1_c_id = find_child(pr1_b_id) + assert pr1_c_id + pr2_c_id = find_child(pr2_b_id) + assert pr2_c_id + + with r1, r2: + r1.post_status(pr1_c_id.head, 'success') + r2.post_status(pr2_c_id.head, 'success') + env.run_crons() + + assert find_child(pr1_c_id) + assert find_child(pr2_c_id) + +def test_add_to_forward_ported(env, config, make_repo, users): + """Add a new branch to an intermediate step of a fw *sequence*, either + because skipci or because all the intermediate CI succeeded + """ + # region setup + r1, _ = make_basic(env, config, make_repo, statuses="default") + r2, fork2 = make_basic(env, config, make_repo, statuses="default") + + with r1: + r1.make_commits('a', Commit('a', tree={'a': 'a'}), ref="heads/pr1") + pr1_a = r1.make_pr(target="a", head="pr1") + r1.post_status(pr1_a.head, 'success') + pr1_a.post_comment('hansen r+', config['role_reviewer']['token']) + env.run_crons() + with r1, r2: + r1.post_status('staging.a', 'success') + r2.post_status('staging.a', 'success') + env.run_crons() + + # region port forward + pr1_a_id = to_pr(env, pr1_a) + pr1_b_id = pr1_a_id.forwardport_ids + assert pr1_b_id + with r1: + r1.post_status(pr1_b_id.head, 'success') + env.run_crons() + pr1_c_id = pr1_a_id.forwardport_ids - pr1_b_id + assert pr1_c_id + # endregion + # endregion + + # new PR must be in fork for labels to actually match + with r2, fork2: + # branch in fork has no owner prefix, but HEAD for cross-repo PR does + fork2.make_commits("b", Commit('b', tree={'b': 'b'}), ref=f'heads/{pr1_b_id.refname}') + pr2_b = r2.make_pr(title="b", target="b", head=pr1_b_id.label) + r2.post_status(pr2_b.head, 'success') + env.run_crons() + + pr2_b_id = to_pr(env, pr2_b) + assert pr2_b_id.batch_id == pr1_b_id.batch_id + assert len(pr2_b_id.forwardport_ids) == 1, \ + "since the batch is already forward ported, the new PR should" \ + " immediately be forward ported to match" + assert pr2_b_id.forwardport_ids.label == pr1_c_id.label + + pr2_a = r1.get_pr(pr1_b_id.number) + with r1, r2: + pr2_a.post_comment('hansen r+', config['role_reviewer']['token']) + pr2_b.post_comment("hansen r+", config['role_reviewer']['token']) + env.run_crons() + + with r1, r2: + r1.post_status('staging.b', 'success') + r2.post_status('staging.b', 'success') + env.run_crons() + + assert pr1_b_id.state == 'merged' + assert pr2_b_id.state == 'merged' + + assert len(pr2_b_id.forwardport_ids) == 1,\ + "verify that pr2_b did not get forward ported again on merge" + pr2_c = r2.get_pr(pr2_b_id.forwardport_ids.number) + assert pr2_c.comments == [ + seen(env, pr2_c, users), + (users['user'], '''\ +@{user} this PR targets c and is the last of the forward-port chain. + +To merge the full chain, use +> @hansen r+ + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +'''.format_map(users)), + ] + +def test_add_to_forward_port_conflict(env, config, make_repo, users): + """If a PR is added to an existing forward port sequence, and it causes + conflicts when forward ported, it should be treated similarly to an *update* + causing a conflict: the PR is still created, but it's set in conflict. + """ + # region setup + r1, _ = make_basic(env, config, make_repo, statuses="default") + r2, fork2 = make_basic(env, config, make_repo, statuses="default") + project = env['runbot_merge.project'].search([]) + with r2: + r2.make_commits( + "c", + Commit("C-onflict", tree={"b": "X"}), + ref="heads/c" + ) + + with r1: + r1.make_commits('a', Commit('a', tree={'a': 'a'}), ref="heads/pr1") + pr1_a = r1.make_pr(target="a", head="pr1") + r1.post_status(pr1_a.head, 'success') + pr1_a.post_comment('hansen r+', config['role_reviewer']['token']) + env.run_crons() + with r1, r2: + r1.post_status('staging.a', 'success') + r2.post_status('staging.a', 'success') + env.run_crons() + + # region port forward + pr1_a_id = to_pr(env, pr1_a) + pr1_b_id = pr1_a_id.forwardport_ids + assert pr1_b_id + with r1: + r1.post_status(pr1_b_id.head, 'success') + env.run_crons() + pr1_c_id = pr1_a_id.forwardport_ids - pr1_b_id + assert pr1_c_id + # endregion + # endregion + + # new PR must be in fork for labels to actually match + with r2, fork2: + # branch in fork has no owner prefix, but HEAD for cross-repo PR does + fork2.make_commits("b", Commit('b', tree={'b': 'b'}), ref=f'heads/{pr1_b_id.refname}') + pr2_b = r2.make_pr(title="b", target="b", head=pr1_b_id.label) + r2.post_status(pr2_b.head, 'success') + env.run_crons() + + pr2_b_id = to_pr(env, pr2_b) + assert pr2_b_id.batch_id == pr1_b_id.batch_id + pr2_c_id = pr2_b_id.forwardport_ids + assert len(pr2_c_id) == 1, \ + "since the batch is already forward ported, the new PR should" \ + " immediately be forward ported to match" + assert pr2_c_id.label == pr1_c_id.label + assert not pr2_c_id.parent_id, "conflict -> should be detached" + assert pr2_c_id.detach_reason + + pr2_a = r1.get_pr(pr1_b_id.number) + with r1, r2: + pr2_a.post_comment('hansen r+', config['role_reviewer']['token']) + pr2_b.post_comment("hansen r+", config['role_reviewer']['token']) + env.run_crons() + + with r1, r2: + r1.post_status('staging.b', 'success') + r2.post_status('staging.b', 'success') + env.run_crons() + + assert pr1_b_id.state == 'merged' + assert pr2_b_id.state == 'merged' + + pr2_c = r2.get_pr(pr2_c_id.number) + assert pr2_c.comments == [ + seen(env, pr2_c, users), + # should have conflicts + (users['user'], """@{user} cherrypicking of pull request {previous.display_name} failed. + +stdout: +``` +Auto-merging b +CONFLICT (add/add): Merge conflict in b + +``` + +Either perform the forward-port manually (and push to this branch, proceeding as usual) or close this PR (maybe?). + +In the former case, you may want to edit this PR message as well. + +:warning: after resolving this conflict, you will need to merge it via @{project.github_prefix}. + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +""".format(project=project, previous=pr2_b_id, **users)) + ] diff --git a/forwardport/tests/test_conflicts.py b/forwardport/tests/test_conflicts.py index 4e6dd084..3c0bbda6 100644 --- a/forwardport/tests/test_conflicts.py +++ b/forwardport/tests/test_conflicts.py @@ -1,8 +1,11 @@ +import random import re import time from operator import itemgetter -from utils import make_basic, Commit, validate_all, re_matches, seen, REF_PATTERN, to_pr +import pytest + +from utils import make_basic, Commit, validate_all, matches, seen, REF_PATTERN, to_pr def test_conflict(env, config, make_repo, users): @@ -16,7 +19,7 @@ def test_conflict(env, config, make_repo, users): project = env['runbot_merge.project'].search([]) project.write({ 'branch_ids': [ - (0, 0, {'name': 'd', 'sequence': 40, 'fp_target': True}) + (0, 0, {'name': 'd', 'sequence': 40}) ] }) @@ -50,6 +53,7 @@ def test_conflict(env, config, make_repo, users): assert prc_id.state == 'opened' p = prod.commit(p_0) + prc = prod.get_pr(prc_id.number) c = prod.commit(prc_id.head) assert c.author == p.author # ignore date as we're specifically not keeping the original's @@ -58,14 +62,36 @@ def test_conflict(env, config, make_repo, users): assert prod.read_tree(c) == { 'f': 'c', 'g': 'a', - 'h': re_matches(r'''<<<\x3c<<< HEAD + 'h': matches('''<<<\x3c<<< $$ a -|||||||| parent of [\da-f]{7,}.* +||||||| $$ ======= xxx ->>>\x3e>>> [\da-f]{7,}.* +>>>\x3e>>> $$ '''), } + assert prc.comments == [ + seen(env, prc, users), + (users['user'], +f'''@{users['user']} @{users['reviewer']} cherrypicking of pull request {pra_id.display_name} failed. + +stdout: +``` +Auto-merging h +CONFLICT (add/add): Merge conflict in h + +``` + +Either perform the forward-port manually (and push to this branch, proceeding as usual) or close this PR (maybe?). + +In the former case, you may want to edit this PR message as well. + +:warning: after resolving this conflict, you will need to merge it via @{project.github_prefix}. + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +''') + ] + prb = prod.get_pr(prb_id.number) assert prb.comments == [ seen(env, prb, users), @@ -76,13 +102,12 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port '''), (users['user'], """@%s @%s the next pull request (%s) is in conflict. \ You can merge the chain up to here by saying -> @%s r+ +> @hansen r+ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port """ % ( users['user'], users['reviewer'], prc_id.display_name, - project.fp_github_name )) ] @@ -148,6 +173,94 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port 'i': 'a', } +def test_massive_conflict(env, config, make_repo): + """If the conflict is large enough, the commit message may exceed ARG_MAX + and trigger E2BIG. + """ + # CONFLICT (modify/delete): <file> deleted in <commit> (<title>) and modified in HEAD. Version HEAD of <file> left in tree. + # + # 107 + 2 * len(filename) + len(title) per conflicting file. + # - filename: random.randbytes(10).hex() -> 20 + # - title: random.randbytes(20).hex() -> 40 + # -> 701 (!) files + + files = [] + while len(files) < 1500: + files.append(random.randbytes(10).hex()) + + # region setup + project = env['runbot_merge.project'].create({ + 'name': "thing", + 'github_token': config['github']['token'], + 'github_prefix': 'hansen', + 'fp_github_token': config['github']['token'], + 'fp_github_name': 'herbert', + 'branch_ids': [ + (0, 0, {'name': 'a', 'sequence': 100}), + (0, 0, {'name': 'b', 'sequence': 80}), + ], + }) + + repo = make_repo("repo") + env['runbot_merge.events_sources'].create({'repository': repo.name}) + + repo_id = env['runbot_merge.repository'].create({ + 'project_id': project.id, + 'name': repo.name, + 'required_statuses': "default", + 'fp_remote_target': repo.name, + 'group_id': False, + }) + env['res.partner'].search([ + ('github_login', '=', config['role_reviewer']['user']) + ]).write({ + 'review_rights': [(0, 0, {'repository_id': repo_id.id, 'review': True})] + }) + + with repo: + # create branch with a ton of empty files + repo.make_commits( + None, + Commit( + random.randbytes(20).hex(), + tree=dict.fromkeys(files, "xoxo"), + ), + ref='heads/a', + ) + + # removes all those files in the next branch + repo.make_commits( + 'a', + Commit( + random.randbytes(20).hex(), + tree=dict.fromkeys(files, "content!"), + ), + ref='heads/b', + ) + # endregion setup + + with repo: + # update all the files + repo.make_commits( + 'a', + Commit(random.randbytes(20).hex(), tree={'a': '1'}), + Commit(random.randbytes(20).hex(), tree={'x': '1'}, reset=True), + ref='heads/change', + ) + pr = repo.make_pr(target='a', head='change') + repo.post_status('refs/heads/change', 'success') + pr.post_comment('hansen rebase-ff r+', config['role_reviewer']['token']) + env.run_crons() + + with repo: + repo.post_status('staging.a', 'success') + env.run_crons() + + # we don't actually need more, the bug crashes the forward port entirely so + # the PR is never even created + _pra_id, _prb_id = env['runbot_merge.pull_requests'].search([], order='number') + + def test_conflict_deleted(env, config, make_repo): prod, other = make_basic(env, config, make_repo) # remove f from b @@ -269,6 +382,7 @@ def test_multiple_commits_same_authorship(env, config, make_repo): assert get(c.author) == get(author) assert get(c.committer) == get(committer) + def test_multiple_commits_different_authorship(env, config, make_repo, users, rolemap): """ When a PR has multiple commits by different authors, the resulting (squashed) conflict commit should have an empty email @@ -316,11 +430,11 @@ def test_multiple_commits_different_authorship(env, config, make_repo, users, ro c = prod.commit(pr2_id.head) assert len(c.parents) == 1 get = itemgetter('name', 'email') - rm = rolemap['user'] - assert get(c.author) == (rm['login'], ''), \ + bot = pr_id.repository.project_id.fp_github_name + assert get(c.author) == (bot, ''), \ "In a multi-author PR, the squashed conflict commit should have the " \ "author set to the bot but an empty email" - assert get(c.committer) == (rm['login'], '') + assert get(c.committer) == (bot, '') assert re.match(r'''<<<\x3c<<< HEAD b @@ -345,7 +459,7 @@ b assert pr2.comments == [ seen(env, pr2, users), - (users['user'], re_matches(r'@%s @%s .*CONFLICT' % (users['user'], users['reviewer']), re.DOTALL)), + (users['user'], matches('@%s @%s $$CONFLICT' % (users['user'], users['reviewer']))), (users['reviewer'], 'hansen r+'), (users['user'], f"@{users['user']} @{users['reviewer']} unable to stage: " "All commits must have author and committer email, " diff --git a/forwardport/tests/test_limit.py b/forwardport/tests/test_limit.py index 8310619d..de563e3d 100644 --- a/forwardport/tests/test_limit.py +++ b/forwardport/tests/test_limit.py @@ -1,126 +1,96 @@ -# -*- coding: utf-8 -*- -import collections -import time - import pytest -from utils import seen, Commit, make_basic +from utils import seen, Commit, make_basic, to_pr -Description = collections.namedtuple('Restriction', 'source limit') -def test_configure(env, config, make_repo): - """ Checks that configuring an FP limit on a PR is respected - * limits to not the latest - * limits to the current target (= no FP) - * limits to an earlier branch (???) - """ - prod, other = make_basic(env, config, make_repo) - bot_name = env['runbot_merge.project'].search([]).fp_github_name - descriptions = [ - Description(source='a', limit='b'), - Description(source='b', limit='b'), - Description(source='b', limit='a'), - ] - originals = [] +@pytest.mark.parametrize('source,limit,count', [ + pytest.param('a', 'b', 1, id='not-last'), + pytest.param('b', 'b', 0, id='current'), + pytest.param('b', 'a', 0, id='earlier'), +]) +def test_configure_fp_limit(env, config, make_repo, source, limit, count, page): + prod, other = make_basic(env, config, make_repo, statuses="default") with prod: - for i, descr in enumerate(descriptions): - [c] = prod.make_commits( - descr.source, Commit('c %d' % i, tree={str(i): str(i)}), - ref='heads/branch%d' % i, - ) - pr = prod.make_pr(target=descr.source, head='branch%d'%i) - prod.post_status(c, 'success', 'legal/cla') - prod.post_status(c, 'success', 'ci/runbot') - pr.post_comment('hansen r+\n%s up to %s' % (bot_name, descr.limit), config['role_reviewer']['token']) - originals.append(pr.number) + [c] = prod.make_commits( + source, Commit('c', tree={'f': 'g'}), + ref='heads/branch', + ) + pr = prod.make_pr(target=source, head='branch') + prod.post_status(c, 'success') + pr.post_comment(f'hansen r+ up to {limit}', config['role_reviewer']['token']) env.run_crons() with prod: - prod.post_status('staging.a', 'success', 'legal/cla') - prod.post_status('staging.a', 'success', 'ci/runbot') - prod.post_status('staging.b', 'success', 'legal/cla') - prod.post_status('staging.b', 'success', 'ci/runbot') + prod.post_status(f'staging.{source}', 'success') env.run_crons() - # should have created a single FP PR for 0, none for 1 and none for 2 - prs = env['runbot_merge.pull_requests'].search([], order='number') - assert len(prs) == 4 - assert prs[-1].parent_id == prs[0] - assert prs[0].number == originals[0] - assert prs[1].number == originals[1] - assert prs[2].number == originals[2] + pr_id = to_pr(env, pr) + descendants = env['runbot_merge.pull_requests'].search([ + ('source_id', '=', pr_id.id) + ]) + assert len(descendants) == count + limit_id = env['runbot_merge.branch'].search([('name', '=', limit)]) + assert pr_id.limit_id == limit_id + assert not descendants.limit_id, "descendant should not inherit the limit explicitly" -def test_self_disabled(env, config, make_repo): - """ Allow setting target as limit even if it's disabled - """ - prod, other = make_basic(env, config, make_repo) - bot_name = env['runbot_merge.project'].search([]).fp_github_name - branch_a = env['runbot_merge.branch'].search([('name', '=', 'a')]) - branch_a.fp_target = False - with prod: - [c] = prod.make_commits('a', Commit('c', tree={'0': '0'}), ref='heads/mybranch') - pr = prod.make_pr(target='a', head='mybranch') - prod.post_status(c, 'success', 'legal/cla') - prod.post_status(c, 'success', 'ci/runbot') - pr.post_comment('hansen r+\n%s up to a' % bot_name, config['role_reviewer']['token']) - env.run_crons() - pr_id = env['runbot_merge.pull_requests'].search([('number', '=', pr.number)]) - assert pr_id.limit_id == branch_a + # check that the basic thingie works + page(f'/{prod.name}/pull/{pr.number}.png') - with prod: - prod.post_status('staging.a', 'success', 'legal/cla') - prod.post_status('staging.a', 'success', 'ci/runbot') + if descendants: + c = env['runbot_merge.branch'].search([('name', '=', 'c')]) + descendants.limit_id = c.id - assert env['runbot_merge.pull_requests'].search([]) == pr_id,\ - "should not have created a forward port" + page(f'/{prod.name}/pull/{pr.number}.png') - -def test_ignore(env, config, make_repo): +def test_ignore(env, config, make_repo, users): """ Provide an "ignore" command which is equivalent to setting the limit to target """ - prod, other = make_basic(env, config, make_repo) - bot_name = env['runbot_merge.project'].search([]).fp_github_name + prod, _ = make_basic(env, config, make_repo, statuses="default") branch_a = env['runbot_merge.branch'].search([('name', '=', 'a')]) with prod: [c] = prod.make_commits('a', Commit('c', tree={'0': '0'}), ref='heads/mybranch') pr = prod.make_pr(target='a', head='mybranch') - prod.post_status(c, 'success', 'legal/cla') - prod.post_status(c, 'success', 'ci/runbot') - pr.post_comment('hansen r+\n%s ignore' % bot_name, config['role_reviewer']['token']) + prod.post_status(c, 'success') + env.run_crons() + with prod: + pr.post_comment('hansen ignore', config['role_reviewer']['token']) + pr.post_comment('hansen r+ fw=no', config['role_reviewer']['token']) env.run_crons() pr_id = env['runbot_merge.pull_requests'].search([('number', '=', pr.number)]) assert pr_id.limit_id == branch_a with prod: - prod.post_status('staging.a', 'success', 'legal/cla') - prod.post_status('staging.a', 'success', 'ci/runbot') + prod.post_status('staging.a', 'success') + env.run_crons() assert env['runbot_merge.pull_requests'].search([]) == pr_id,\ "should not have created a forward port" + assert pr.comments == [ + seen(env, pr, users), + (users['reviewer'], "hansen ignore"), + (users['reviewer'], "hansen r+ fw=no"), + (users['user'], "'ignore' is deprecated, use 'fw=no' to disable forward porting."), + (users['user'], "Forward-port disabled (via limit)."), + (users['user'], "Disabled forward-porting."), + ] -@pytest.mark.parametrize('enabled', ['active', 'fp_target']) -def test_disable(env, config, make_repo, users, enabled): +def test_disable(env, config, make_repo, users): """ Checks behaviour if the limit target is disabled: * disable target while FP is ongoing -> skip over (and stop there so no FP) * forward-port over a disabled branch * request a disabled target as limit - - Disabling (with respect to forward ports) can be performed by marking the - branch as !active (which also affects mergebot operations), or as - !fp_target (won't be forward-ported to). """ prod, other = make_basic(env, config, make_repo) project = env['runbot_merge.project'].search([]) - bot_name = project.fp_github_name with prod: [c] = prod.make_commits('a', Commit('c 0', tree={'0': '0'}), ref='heads/branch0') pr = prod.make_pr(target='a', head='branch0') prod.post_status(c, 'success', 'legal/cla') prod.post_status(c, 'success', 'ci/runbot') - pr.post_comment('hansen r+\n%s up to b' % bot_name, config['role_reviewer']['token']) + pr.post_comment('hansen r+ up to b', config['role_reviewer']['token']) [c] = prod.make_commits('a', Commit('c 1', tree={'1': '1'}), ref='heads/branch1') pr = prod.make_pr(target='a', head='branch1') @@ -133,7 +103,7 @@ def test_disable(env, config, make_repo, users, enabled): prod.post_status('staging.a', 'success', 'legal/cla') prod.post_status('staging.a', 'success', 'ci/runbot') # disable branch b - env['runbot_merge.branch'].search([('name', '=', 'b')]).write({enabled: False}) + env['runbot_merge.branch'].search([('name', '=', 'b')]).active = False env.run_crons() # should have created a single PR (to branch c, for pr 1) @@ -141,85 +111,66 @@ def test_disable(env, config, make_repo, users, enabled): assert p.parent_id == _1 assert p.target.name == 'c' - project.fp_github_token = config['role_other']['token'] - bot_name = project.fp_github_name with prod: [c] = prod.make_commits('a', Commit('c 2', tree={'2': '2'}), ref='heads/branch2') pr = prod.make_pr(target='a', head='branch2') prod.post_status(c, 'success', 'legal/cla') prod.post_status(c, 'success', 'ci/runbot') - pr.post_comment('hansen r+\n%s up to' % bot_name, config['role_reviewer']['token']) - pr.post_comment('%s up to b' % bot_name, config['role_reviewer']['token']) - pr.post_comment('%s up to foo' % bot_name, config['role_reviewer']['token']) - pr.post_comment('%s up to c' % bot_name, config['role_reviewer']['token']) + pr.post_comment('hansen r+ up to', config['role_reviewer']['token']) + pr.post_comment('hansen up to b', config['role_reviewer']['token']) + pr.post_comment('hansen up to foo', config['role_reviewer']['token']) + pr.post_comment('hansen up to c', config['role_reviewer']['token']) env.run_crons() # use a set because git webhooks delays might lead to mis-ordered # responses and we don't care that much assert set(pr.comments) == { - (users['reviewer'], "hansen r+\n%s up to" % bot_name), - (users['other'], "@%s please provide a branch to forward-port to." % users['reviewer']), - (users['reviewer'], "%s up to b" % bot_name), - (users['other'], "@%s branch 'b' is disabled, it can't be used as a forward port target." % users['reviewer']), - (users['reviewer'], "%s up to foo" % bot_name), - (users['other'], "@%s there is no branch 'foo', it can't be used as a forward port target." % users['reviewer']), - (users['reviewer'], "%s up to c" % bot_name), - (users['other'], "Forward-porting to 'c'."), seen(env, pr, users), + (users['reviewer'], "hansen r+ up to"), + (users['user'], """\ +@{reviewer} please provide a branch to forward-port to. + +For your own safety I've ignored *everything in your entire comment*. + +Currently available commands: + +|command|| +|-|-| +|`help`|displays this help| +|`r(eview)+`|approves the PR, if it's a forwardport also approves all non-detached parents| +|`r(eview)=<number>`|only approves the specified parents| +|`fw=no`|does not forward-port this PR| +|`fw=default`|forward-ports this PR normally| +|`fw=skipci`|does not wait for a forward-port's statuses to succeed before creating the next one| +|`up to <branch>`|only ports this PR forward to the specified branch (included)| +|`merge`|integrate the PR with a simple merge commit, using the PR description as message| +|`rebase-merge`|rebases the PR on top of the target branch the integrates with a merge commit, using the PR description as message| +|`rebase-ff`|rebases the PR on top of the target branch, then fast-forwards| +|`squash`|squashes the PR as a single commit on the target branch, using the PR description as message| +|`delegate+`|grants approval rights to the PR author| +|`delegate=<...>`|grants approval rights on this PR to the specified github users| +|`default`|stages the PR normally| +|`priority`|tries to stage this PR first, then adds `default` PRs if the staging has room| +|`alone`|stages this PR only with other PRs of the same priority| +|`cancel=staging`|automatically cancels the current staging when this PR becomes ready| +|`check`|fetches or refreshes PR metadata, resets mergebot state| + +Note: this help text is dynamic and will change with the state of the PR. +""".format_map(users)), + (users['reviewer'], "hansen up to b"), + (users['user'], "@{reviewer} branch 'b' is disabled, it can't be used as a forward port target.".format_map(users)), + (users['reviewer'], "hansen up to foo"), + (users['user'], "@{reviewer} there is no branch 'foo', it can't be used as a forward port target.".format_map(users)), + (users['reviewer'], "hansen up to c"), + (users['user'], "Forward-porting to 'c'."), } -def test_default_disabled(env, config, make_repo, users): - """ If the default limit is disabled, it should still be the default - limit but the ping message should be set on the actual last FP (to the - last non-deactivated target) - """ - prod, other = make_basic(env, config, make_repo) - branch_c = env['runbot_merge.branch'].search([('name', '=', 'c')]) - branch_c.fp_target = False - - with prod: - [c] = prod.make_commits('a', Commit('c', tree={'0': '0'}), ref='heads/branch0') - pr = prod.make_pr(target='a', head='branch0') - prod.post_status(c, 'success', 'legal/cla') - prod.post_status(c, 'success', 'ci/runbot') - pr.post_comment('hansen r+', config['role_reviewer']['token']) - env.run_crons() - - assert env['runbot_merge.pull_requests'].search([]).limit_id == branch_c - - with prod: - prod.post_status('staging.a', 'success', 'legal/cla') - prod.post_status('staging.a', 'success', 'ci/runbot') - env.run_crons() - - p1, p2 = env['runbot_merge.pull_requests'].search([], order='number') - assert p1.number == pr.number - pr2 = prod.get_pr(p2.number) - - cs = pr2.comments - assert len(cs) == 2 - assert pr2.comments == [ - seen(env, pr2, users), - (users['user'], """\ -@%(user)s @%(reviewer)s this PR targets b and is the last of the forward-port chain. - -To merge the full chain, say -> @%(user)s r+ - -More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port -""" % users) - ] - def test_limit_after_merge(env, config, make_repo, users): - """ If attempting to set a limit (<up to>) on a PR which is merged - (already forward-ported or not), or is a forward-port PR, fwbot should - just feedback that it won't do it - """ prod, other = make_basic(env, config, make_repo) reviewer = config['role_reviewer']['token'] + branch_b = env['runbot_merge.branch'].search([('name', '=', 'b')]) branch_c = env['runbot_merge.branch'].search([('name', '=', 'c')]) - bot_name = env['runbot_merge.project'].search([]).fp_github_name with prod: [c] = prod.make_commits('a', Commit('c', tree={'0': '0'}), ref='heads/abranch') pr1 = prod.make_pr(target='a', head='abranch') @@ -234,20 +185,20 @@ def test_limit_after_merge(env, config, make_repo, users): env.run_crons() p1, p2 = env['runbot_merge.pull_requests'].search([], order='number') - assert p1.limit_id == p2.limit_id == branch_c, "check that limit is correctly set" + assert p1.limit_id == p2.limit_id == env['runbot_merge.branch'].browse(()) pr2 = prod.get_pr(p2.number) with prod: - pr1.post_comment(bot_name + ' up to b', reviewer) - pr2.post_comment(bot_name + ' up to b', reviewer) + pr1.post_comment('hansen up to b', reviewer) + pr2.post_comment('hansen up to b', reviewer) env.run_crons() - assert p1.limit_id == p2.limit_id == branch_c, \ - "check that limit was not updated" + assert p1.limit_id == p2.limit_id == branch_b assert pr1.comments == [ (users['reviewer'], "hansen r+"), seen(env, pr1, users), - (users['reviewer'], bot_name + ' up to b'), - (bot_name, "@%s forward-port limit can only be set before the PR is merged." % users['reviewer']), + (users['reviewer'], 'hansen up to b'), + (users['user'], "Forward-porting to 'b'."), + (users['user'], f"Forward-porting to 'b' (from {p2.display_name})."), ] assert pr2.comments == [ seen(env, pr2, users), @@ -256,12 +207,8 @@ This PR targets b and is part of the forward-port chain. Further PRs will be cre More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port """), - (users['reviewer'], bot_name + ' up to b'), - (bot_name, "@%s forward-port limit can only be set on an origin PR" - " (%s here) before it's merged and forward-ported." % ( - users['reviewer'], - p1.display_name, - )), + (users['reviewer'], 'hansen up to b'), + (users['user'], f"Forward-porting {p1.display_name} to 'b'."), ] # update pr2 to detach it from pr1 @@ -277,17 +224,272 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port assert p2.source_id == p1 with prod: - pr2.post_comment(bot_name + ' up to b', reviewer) + pr2.post_comment('hansen up to c', reviewer) env.run_crons() assert pr2.comments[4:] == [ - (bot_name, "@%s @%s this PR was modified / updated and has become a normal PR. " - "It should be merged the normal way (via @%s)" % ( - users['user'], users['reviewer'], - p2.repository.project_id.github_prefix - )), - (users['reviewer'], bot_name + ' up to b'), - (bot_name, f"@{users['reviewer']} forward-port limit can only be set on an origin PR " - f"({p1.display_name} here) before it's merged and forward-ported." - ), + (users['user'], f"@{users['user']} @{users['reviewer']} this PR was modified / updated and has become a normal PR. It must be merged directly."), + (users['reviewer'], 'hansen up to c'), + (users['user'], "Forward-porting to 'c'."), ] + with prod: + prod.post_status(p2.head, 'success', 'legal/cla') + prod.post_status(p2.head, 'success', 'ci/runbot') + pr2.post_comment('hansen r+', reviewer) + env.run_crons() + with prod: + prod.post_status('staging.b', 'success', 'legal/cla') + prod.post_status('staging.b', 'success', 'ci/runbot') + env.run_crons() + + _, _, p3 = env['runbot_merge.pull_requests'].search([], order='number') + assert p3 + pr3 = prod.get_pr(p3.number) + with prod: + pr3.post_comment("hansen up to c", reviewer) + env.run_crons() + assert pr3.comments == [ + seen(env, pr3, users), + (users['user'], f"""\ +@{users['user']} @{users['reviewer']} this PR targets c and is the last of the forward-port chain. + +To merge the full chain, use +> @hansen r+ + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +"""), + (users['reviewer'], "hansen up to c"), + (users['user'], f"Forward-porting {p2.display_name} to 'c'."), + ] + # 7 of previous check, plus r+ + assert pr2.comments[8:] == [ + (users['user'], f"Forward-porting to 'c' (from {p3.display_name}).") + ] + + + +@pytest.mark.parametrize("update_from", [ + pytest.param(lambda source: [('id', '=', source)], id='source'), + pytest.param(lambda source: [('source_id', '=', source), ('target', '=', '2')], id='child'), + pytest.param(lambda source: [('source_id', '=', source), ('target', '=', '3')], id='root'), + pytest.param(lambda source: [('source_id', '=', source), ('target', '=', '4')], id='parent'), + pytest.param(lambda source: [('source_id', '=', source), ('target', '=', '5')], id='current'), + # pytest.param(id='tip'), # doesn't exist +]) +@pytest.mark.parametrize("limit", range(1, 6+1)) +def test_post_merge( + env, post_merge, users, config, branches, + update_from: callable, + limit: int, +): + PRs = env['runbot_merge.pull_requests'] + project, prod, _ = post_merge + reviewer = config['role_reviewer']['token'] + + # fetch source PR + [source] = PRs.search([('source_id', '=', False)]) + + # validate the forward ports for "child", "root", and "parent" so "current" + # exists and we have one more target + for branch in map(str, range(2, 4+1)): + setci(source=source, repo=prod, target=branch) + env.run_crons() + # update 3 to make it into a root + root = PRs.search([('source_id', '=', source.id), ('target.name', '=', '3')]) + root.write({'parent_id': False, 'detach_reason': 'testing'}) + # send detach messages so they're not part of the limit stuff batch + env.run_crons() + + # cheat: we know PR numbers are assigned sequentially + prs = list(map(prod.get_pr, range(1, 6))) + before = {p.number: len(p.comments) for p in prs} + + from_id = PRs.search(update_from(source.id)) + from_ = prod.get_pr(from_id.number) + with prod: + from_.post_comment(f'hansen up to {limit}', reviewer) + env.run_crons() + + # there should always be a comment on the source and root indicating how + # far we port + # the PR we post on should have a comment indicating the correction + current_id = PRs.search([('number', '=', '5')]) + actual_limit = max(limit, 5) + for p in prs: + # case for the PR on which we posted the comment + if p.number == from_.number: + root_opt = '' if p.number == root.number else f' {root.display_name}' + trailer = '' if actual_limit == limit else f" (instead of the requested '{limit}' because {current_id.display_name} already exists)" + assert p.comments[before[p.number] + 1:] == [ + (users['user'], f"Forward-porting{root_opt} to '{actual_limit}'{trailer}.") + ] + # case for reference PRs source and root (which get their own notifications) + elif p.number in (source.number, root.number): + assert p.comments[before[p.number]:] == [ + (users['user'], f"Forward-porting to '{actual_limit}' (from {from_id.display_name}).") + ] + +@pytest.mark.parametrize('mode', [ + None, + # last forward port should fail ci, and only be validated after target bump + 'failbump', + # last forward port should fail ci, then be validated, then target bump + 'failsucceed', + # last forward port should be merged before bump + 'mergetip', + # every forward port should be merged before bump + 'mergeall', +]) +def test_resume_fw(env, post_merge, users, config, branches, mode): + """Singleton version of test_post_merge: completes the forward porting + including validation then tries to increase the limit, which should resume + forward porting + """ + + PRs = env['runbot_merge.pull_requests'] + project, prod, _ = post_merge + reviewer = config['role_reviewer']['token'] + + # fetch source PR + [source] = PRs.search([('source_id', '=', False)]) + with prod: + prod.get_pr(source.number).post_comment('hansen up to 5', reviewer) + # validate the forward ports for "child", "root", and "parent" so "current" + # exists and we have one more target + for branch in map(str, range(2, 5+1)): + setci( + source=source, repo=prod, target=branch, + status='failure' if branch == '5' and mode in ('failbump', 'failsucceed') else 'success' + ) + env.run_crons() + # cheat: we know PR numbers are assigned sequentially + prs = list(map(prod.get_pr, range(1, 6))) + before = {p.number: len(p.comments) for p in prs} + + if mode == 'failsucceed': + setci(source=source, repo=prod, target=5) + # sees the success, limit is still 5, considers the porting finished + env.run_crons() + + if mode and mode.startswith('merge'): + numbers = range(5 if mode == 'mergetip' else 2, 5 + 1) + with prod: + for number in numbers: + prod.get_pr(number).post_comment('hansen r+', reviewer) + env.run_crons() + with prod: + for target in numbers: + pr = PRs.search([('target.name', '=', str(target))]) + prod.post_status(f'staging.{target}', 'success') + env.run_crons() + for number in numbers: + assert PRs.search([('number', '=', number)]).state == 'merged' + + from_ = prod.get_pr(source.number) + with prod: + from_.post_comment('hansen up to 6', reviewer) + env.run_crons() + + if mode == 'failbump': + setci(source=source, repo=prod, target=5) + # setci moved the PR from opened to validated, so *now* it can be + # forward-ported, but that still needs to actually happen + env.run_crons() + + # since PR5 CI succeeded and we've increased the limit there should be a + # new PR + assert PRs.search([('source_id', '=', source.id), ('target.name', '=', 6)]) + pr5_id = PRs.search([('source_id', '=', source.id), ('target.name', '=', 5)]) + if mode == 'failbump': + # because the initial forward porting was never finished as the PR CI + # failed until *after* we bumped the limit, so it's not *resuming* per se. + assert prs[0].comments[before[1]+1:] == [ + (users['user'], f"Forward-porting to '6'.") + ] + else: + assert prs[0].comments[before[1]+1:] == [ + (users['user'], f"Forward-porting to '6', resuming forward-port stopped at {pr5_id.display_name}.") + ] + +def setci(*, source, repo, target, status='success'): + """Validates (CI success) the descendant of ``source`` targeting ``target`` + in ``repo``. + """ + pr = source.search([('source_id', '=', source.id), ('target.name', '=', str(target))]) + assert pr, f"could not find forward port of {source.display_name} to {target}" + with repo: + repo.post_status(pr.head, status) + + +@pytest.fixture(scope='session') +def branches(): + """Need enough branches to make space for: + + - a source + - an ancestor (before and separated from the root, but not the source) + - a root (break in the parent chain + - a parent (between "current" and root) + - "current" + - the tip branch + """ + return range(1, 6 + 1) + +@pytest.fixture +def post_merge(env, config, users, make_repo, branches): + """Create a setup for the post-merge limits test which is both simpler and + more complicated than the standard test setup(s): it doesn't need more + variety in code, but it needs a lot more "depth" in terms of number of + branches it supports. Branches are fixture-ed to make it easier to share + between this fixture and the actual test. + + All the branches are set to the same commit because that basically + shouldn't matter. + """ + prod = make_repo("post-merge-test") + with prod: + [c] = prod.make_commits(None, Commit('base', tree={'f': ''})) + for i in branches: + prod.make_ref(f'heads/{i}', c) + dev = prod.fork() + + proj = env['runbot_merge.project'].create({ + 'name': prod.name, + 'github_token': config['github']['token'], + 'github_prefix': 'hansen', + 'fp_github_token': config['github']['token'], + 'fp_github_name': 'herbert', + 'branch_ids': [ + (0, 0, {'name': str(i), 'sequence': 1000 - (i * 10)}) + for i in branches + ], + 'repo_ids': [ + (0, 0, { + 'name': prod.name, + 'required_statuses': 'default', + 'fp_remote_target': dev.name, + }) + ] + }) + env['runbot_merge.events_sources'].create({'repository': prod.name}) + + env['res.partner'].search([ + ('github_login', '=', config['role_reviewer']['user']) + ]).write({ + 'review_rights': [(0, 0, {'repository_id': proj.repo_ids.id, 'review': True})] + }) + + reviewer = config['role_reviewer']['token'] + # merge the source PR + source_target = str(branches[0]) + with prod: + [c] = prod.make_commits(source_target, Commit('my pr', tree={'x': ''}), ref='heads/mypr') + pr1 = prod.make_pr(target=source_target, head=c, title="a title") + + prod.post_status(c, 'success') + pr1.post_comment('hansen r+', reviewer) + env.run_crons() + with prod: + prod.post_status(f'staging.{source_target}', 'success') + env.run_crons() + + return proj, prod, dev diff --git a/forwardport/tests/test_overrides.py b/forwardport/tests/test_overrides.py index e8a18d35..e41d2409 100644 --- a/forwardport/tests/test_overrides.py +++ b/forwardport/tests/test_overrides.py @@ -12,39 +12,45 @@ def test_override_inherited(env, config, make_repo, users): """ repo, other = make_basic(env, config, make_repo) project = env['runbot_merge.project'].search([]) + project.repo_ids.status_ids = [(5, 0, 0), (0, 0, {'context': 'default'})] env['res.partner'].search([('github_login', '=', users['reviewer'])])\ .write({'override_rights': [(0, 0, { 'repository_id': project.repo_ids.id, - 'context': 'ci/runbot', + 'context': 'default', })]}) with repo: - repo.make_commits('a', Commit('C', tree={'a': '0'}), ref='heads/change') + repo.make_commits('a', Commit('pr 1', tree={'a': '0'}), ref='heads/change') pr = repo.make_pr(target='a', head='change') - repo.post_status('change', 'success', 'legal/cla') - pr.post_comment('hansen r+ override=ci/runbot', config['role_reviewer']['token']) + pr.post_comment('hansen r+ override=default', config['role_reviewer']['token']) env.run_crons() original = env['runbot_merge.pull_requests'].search([('repository.name', '=', repo.name), ('number', '=', pr.number)]) assert original.state == 'ready' + assert not original.limit_id with repo: - repo.post_status('staging.a', 'success', 'legal/cla') - repo.post_status('staging.a', 'success', 'ci/runbot') + repo.post_status('staging.a', 'success') env.run_crons() - pr0_id, pr1_id = env['runbot_merge.pull_requests'].search([], order='number') + pr0_id, pr1_id, pr2_id = env['runbot_merge.pull_requests'].search([], order='number') assert pr0_id == original - assert pr1_id.parent_id, pr0_id + assert pr0_id.target.name == 'a' - with repo: - repo.post_status(pr1_id.head, 'success', 'legal/cla') - env.run_crons() + assert pr1_id.parent_id == pr0_id + assert pr1_id.number == 2 + assert pr1_id.target.name == 'b' assert pr1_id.state == 'validated' - assert statuses(pr1_id) == {'ci/runbot': 'success', 'legal/cla': 'success'} + assert statuses(pr1_id) == {'default': 'success'} + + assert pr2_id.parent_id == pr1_id + assert pr2_id.target.name == 'c' + assert pr2_id.state == 'validated' + assert statuses(pr2_id) == {'default': 'success'} # now we edit the child PR - pr_repo, pr_ref = repo.get_pr(pr1_id.number).branch + pr1 = repo.get_pr(pr1_id.number) + pr_repo, pr_ref = pr1.branch with pr_repo: pr_repo.make_commits( pr1_id.target.name, @@ -56,6 +62,12 @@ def test_override_inherited(env, config, make_repo, users): assert pr1_id.state == 'opened' assert not pr1_id.parent_id assert statuses(pr1_id) == {}, "should not have any status left" + assert statuses(pr2_id) == {} + + with repo: + pr1.post_comment('hansen override=default', config['role_reviewer']['token']) + assert statuses(pr1_id) == {'default': 'success'} + assert statuses(pr2_id) == {'default': 'success'} def test_override_combination(env, config, make_repo, users): """ A forwardport should inherit its parents' overrides, until it's edited. diff --git a/forwardport/tests/test_simple.py b/forwardport/tests/test_simple.py index ef31c495..634a25c0 100644 --- a/forwardport/tests/test_simple.py +++ b/forwardport/tests/test_simple.py @@ -6,7 +6,7 @@ from datetime import datetime, timedelta import pytest -from utils import seen, Commit, make_basic, REF_PATTERN, MESSAGE_TEMPLATE, validate_all, part_of +from utils import seen, Commit, make_basic, REF_PATTERN, MESSAGE_TEMPLATE, validate_all, part_of, to_pr, matches FMT = '%Y-%m-%d %H:%M:%S' FAKE_PREV_WEEK = (datetime.now() + timedelta(days=1)).strftime(FMT) @@ -35,7 +35,6 @@ def test_straightforward_flow(env, config, make_repo, users): other_user = config['role_other'] other_user_repo = prod.fork(token=other_user['token']) - project = env['runbot_merge.project'].search([]) b_head = prod.commit('b') c_head = prod.commit('c') with prod, other_user_repo: @@ -109,7 +108,7 @@ def test_straightforward_flow(env, config, make_repo, users): assert c.author['name'] == other_user['user'], "author should still be original's probably" assert c.committer['name'] == other_user['user'], "committer should also still be the original's, really" - assert pr1.ping() == "@%s @%s " % ( + assert pr1.ping == "@%s @%s " % ( config['role_other']['user'], config['role_reviewer']['user'], ), "ping of forward-port PR should include author and reviewer of source" @@ -124,7 +123,7 @@ def test_straightforward_flow(env, config, make_repo, users): prod.post_status(pr1.head, 'success', 'legal/cla') env.run_crons() - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) pr0_, pr1_, pr2 = env['runbot_merge.pull_requests'].search([], order='number') @@ -132,11 +131,15 @@ def test_straightforward_flow(env, config, make_repo, users): (users['reviewer'], 'hansen r+ rebase-ff'), seen(env, pr, users), (users['user'], 'Merge method set to rebase and fast-forward.'), - (users['user'], '@%s @%s this pull request has forward-port PRs awaiting action (not merged or closed):\n%s' % ( - users['other'], users['reviewer'], - '\n- '.join((pr1 | pr2).mapped('display_name')) - )), ] + pr1_remote = prod.get_pr(pr1.number) + assert pr1_remote.comments == [ + seen(env, pr1_remote, users), + (users['user'], """\ +This PR targets b and is part of the forward-port chain. Further PRs will be created up to c. + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +""")] assert pr0_ == pr0 assert pr1_ == pr1 @@ -160,21 +163,25 @@ def test_straightforward_flow(env, config, make_repo, users): @%s @%s this PR targets c and is the last of the forward-port chain containing: * %s -To merge the full chain, say -> @%s r+ +To merge the full chain, use +> @hansen r+ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port """ % ( users['other'], users['reviewer'], pr1.display_name, - project.fp_github_name )), + (users['user'], "@%s @%s this forward port of %s is awaiting action (not merged or closed)." % ( + users['other'], + users['reviewer'], + pr0.display_name, + )) ] with prod: prod.post_status(pr2.head, 'success', 'ci/runbot') prod.post_status(pr2.head, 'success', 'legal/cla') - pr2_remote.post_comment('%s r+' % project.fp_github_name, config['role_reviewer']['token']) + pr2_remote.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() @@ -232,7 +239,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port assert other_user_repo.get_ref(pr.ref) == p_1 # should have deleted all PR branches - pr1_ref = prod.get_pr(pr1.number).ref + pr1_ref = pr1_remote.ref with pytest.raises(AssertionError, match='Not Found'): other.get_ref(pr1_ref) @@ -315,36 +322,69 @@ def test_empty(env, config, make_repo, users): assert env['runbot_merge.pull_requests'].search([], order='number') == prs # change FP token to see if the feedback comes from the proper user project = env['runbot_merge.project'].search([]) - project.fp_github_token = config['role_other']['token'] + project.write({ + 'fp_github_name': False, + 'fp_github_token': config['role_other']['token'], + }) assert project.fp_github_name == users['other'] # check reminder - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) awaiting = ( users['other'], - '@%s @%s this pull request has forward-port PRs awaiting action (not merged or closed):\n%s' % ( + '@%s @%s this forward port of %s is awaiting action (not merged or closed).' % ( users['user'], users['reviewer'], - fail_id.display_name + pr1_id.display_name ) ) + conflict = (users['user'], matches( + f"""@{users['user']} @{users['reviewer']} cherrypicking of pull request {pr1_id.display_name} failed. + +stdout: +``` +$$ +``` + +stderr: +``` +$$ +``` + +Either perform the forward-port manually (and push to this branch, proceeding as usual) or close this PR (maybe?). + +In the former case, you may want to edit this PR message as well. + +:warning: after resolving this conflict, you will need to merge it via @{project.github_prefix}. + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +""")) assert pr1.comments == [ (users['reviewer'], 'hansen r+'), seen(env, pr1, users), + ] + fail_pr = prod.get_pr(fail_id.number) + assert fail_pr.comments == [ + seen(env, fail_pr, users), + conflict, awaiting, awaiting, - ], "each cron run should trigger a new message on the ancestor" + ], "each cron run should trigger a new message" # check that this stops if we close the PR with prod: - prod.get_pr(fail_id.number).close() - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + fail_pr.close() + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) assert pr1.comments == [ (users['reviewer'], 'hansen r+'), seen(env, pr1, users), - awaiting, - awaiting, ] + assert fail_pr.comments == [ + seen(env, fail_pr, users), + conflict, + awaiting, + awaiting, + ], "each cron run should trigger a new message" def test_partially_empty(env, config, make_repo): """ Check what happens when only some commits of the PR are now empty @@ -489,7 +529,7 @@ def test_access_rights(env, config, make_repo, users, author, reviewer, delegate prod.post_status(pr2.head, 'success', 'ci/runbot') prod.post_status(pr2.head, 'success', 'legal/cla') prod.get_pr(pr2.number).post_comment( - '%s r+' % project.fp_github_name, + 'hansen r+', token=config['role_' + reviewer]['token'] ) env.run_crons() @@ -513,6 +553,69 @@ def signoff(conf, message): return signoff raise AssertionError("Failed to find signoff by %s in %s" % (conf, message)) +def test_disapproval(env, config, make_repo, users): + """The author of a source PR should be able to unapprove the forward port in + case they approved it then noticed an issue of something. + """ + # region setup + prod, _ = make_basic(env, config, make_repo, statuses='default') + env['res.partner'].create({ + 'name': users['other'], + 'github_login': users['other'], + 'email': 'other@example.org', + }) + + author_token = config['role_other']['token'] + fork = prod.fork(token=author_token) + with prod, fork: + [c] = fork.make_commits('a', Commit('c_0', tree={'y': '0'}), ref='heads/accessrights') + pr0 = prod.make_pr( + target='a', title='my change', + head=users['other'] + ':accessrights', + token=author_token, + ) + prod.post_status(c, 'success') + pr0.post_comment('hansen r+', token=config['role_reviewer']['token']) + env.run_crons() + + with prod: + prod.post_status('staging.a', 'success') + env.run_crons() + + pr0_id, pr1_id = env['runbot_merge.pull_requests'].search([], order='number') + assert pr1_id.source_id == pr0_id + pr1 = prod.get_pr(pr1_id.number) + assert pr0_id.state == 'merged' + with prod: + prod.post_status(pr1_id.head, 'success') + env.run_crons() + # endregion + + _, _, pr2_id = env['runbot_merge.pull_requests'].search([], order='number') + pr2 = prod.get_pr(pr2_id.number) + with prod: + prod.post_status(pr2_id.head, 'success') + pr2.post_comment('hansen r+', token=config['role_other']['token']) + # no point creating staging for our needs, just propagate statuses + env.run_crons(None) + assert pr1_id.state == 'ready' + assert pr2_id.state == 'ready' + + # oh no, pr1 has an error! + with prod: + pr1.post_comment('hansen r-', token=config['role_other']['token']) + env.run_crons(None) + assert pr1_id.state == 'validated', "pr1 should not be approved anymore" + assert pr2_id.state == 'ready', "pr2 should not be affected" + + assert pr1.comments == [ + seen(env, pr1, users), + (users['user'], 'This PR targets b and is part of the forward-port chain. Further PRs will be created up to c.\n\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n'), + (users['other'], "hansen r-"), + (users['user'], "Note that only this forward-port has been unapproved, " + "sibling forward ports may have to be unapproved " + "individually."), + ] def test_delegate_fw(env, config, make_repo, users): """If a user is delegated *on a forward port* they should be able to approve @@ -582,8 +685,8 @@ def test_delegate_fw(env, config, make_repo, users): seen(env, pr2, users), (users['user'], '''@{self_reviewer} @{reviewer} this PR targets c and is the last of the forward-port chain. -To merge the full chain, say -> @{user} r+ +To merge the full chain, use +> @hansen r+ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port '''.format_map(users)), @@ -626,7 +729,7 @@ def test_redundant_approval(env, config, make_repo, users): with prod: pr1.post_comment('hansen r+', config['role_reviewer']['token']) with prod: - pr2.post_comment(f'{project.fp_github_name} r+', config['role_reviewer']['token']) + pr2.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() assert pr1.comments == [ @@ -738,7 +841,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port # ok main1 PRs with main1: validate_all([main1], [pr1c.head]) - main1.get_pr(pr1c.number).post_comment('%s r+' % project.fp_github_name, config['role_reviewer']['token']) + main1.get_pr(pr1c.number).post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() # check that the main1 PRs are ready but blocked on the main2 PRs @@ -750,7 +853,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port # ok main2 PRs with main2: validate_all([main2], [pr2c.head]) - main2.get_pr(pr2c.number).post_comment('%s r+' % project.fp_github_name, config['role_reviewer']['token']) + main2.get_pr(pr2c.number).post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() env['runbot_merge.stagings'].search([]).mapped('target.display_name') @@ -796,7 +899,7 @@ class TestClosing: prod.post_status(pr1_id.head, 'success', 'legal/cla') prod.post_status(pr1_id.head, 'success', 'ci/runbot') env.run_crons() - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron') + env.run_crons('forwardport.reminder') assert env['runbot_merge.pull_requests'].search([], order='number') == pr0_id | pr1_id,\ "closing the PR should suppress the FP sequence" @@ -858,27 +961,86 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port with prod: pr1.open() assert pr1_id.state == 'validated' - env.run_crons() - assert pr1.comments[-1] == ( - users['user'], - "@{} @{} this PR was closed then reopened. " - "It should be merged the normal way (via @{})".format( - users['user'], - users['reviewer'], - project.github_prefix, - ) - ) + assert not pr1_id.parent_id + assert not pr2_id.parent_id - with prod: - pr1.post_comment(f'{project.fp_github_name} r+', config['role_reviewer']['token']) + def test_close_disabled(self, env, make_repo, users, config): + """ If an fwport's target is disabled and its branch is closed, it + should not be notified (multiple times), also its descendant should not + be nodified if already merged, also there should not be recursive + notifications (odoo/odoo#145969, odoo/odoo#145984) + """ + repo, _ = make_basic(env, config, make_repo) + env['runbot_merge.repository'].search([]).required_statuses = 'default' + # prep: merge PR, create two forward ports + with repo: + [c1] = repo.make_commits('a', Commit('first', tree={'m': 'c1'})) + pr1 = repo.make_pr(title='title', body='body', target='a', head=c1) + pr1.post_comment('hansen r+', config['role_reviewer']['token']) + repo.post_status(c1, 'success') env.run_crons() - assert pr1.comments[-1] == ( - users['user'], - "@{} I can only do this on unmodified forward-port PRs, ask {}.".format( - users['reviewer'], - project.github_prefix, - ), - ) + + pr1_id = to_pr(env, pr1) + assert pr1_id.state == 'ready', pr1_id.blocked + + with repo: + repo.post_status('staging.a', 'success') + env.run_crons() + + pr1_id_, pr2_id = env['runbot_merge.pull_requests'].search([], order='number') + assert pr1_id_ == pr1_id + with repo: + repo.post_status(pr2_id.head, 'success') + env.run_crons() + + _, _, pr3_id = env['runbot_merge.pull_requests'].search([], order='number') + + # disable second branch + pr2_id.target.active = False + env.run_crons() + + pr2 = repo.get_pr(pr2_id.number) + assert pr2.comments == [ + seen(env, pr2, users), + (users['user'], "This PR targets b and is part of the forward-port chain. " + "Further PRs will be created up to c.\n\n" + "More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n"), + (users['user'], "@{user} @{reviewer} the target branch 'b' has been disabled, you may want to close this PR.".format_map( + users + )), + ] + pr3 = repo.get_pr(pr3_id.number) + assert pr3.comments == [ + seen(env, pr3, users), + (users['user'], """\ +@{user} @{reviewer} this PR targets c and is the last of the forward-port chain containing: +* {pr2_id.display_name} + +To merge the full chain, use +> @hansen r+ + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +""".format(pr2_id=pr2_id, **users)), + ] + + # some time later, notice PR3 is open and merge it + with repo: + pr3.post_comment('hansen r+', config['role_reviewer']['token']) + repo.post_status(pr3.head, 'success') + env.run_crons() + with repo: + repo.post_status('staging.c', 'success') + env.run_crons() + + assert pr3_id.status == 'success' + + # even later, notice PR2 is still open but not mergeable anymore + with repo: + pr2.close() + env.run_crons() + + assert pr2.comments[3:] == [] + assert pr3.comments[2:] == [(users['reviewer'], "hansen r+")] class TestBranchDeletion: def test_delete_normal(self, env, config, make_repo): @@ -981,50 +1143,44 @@ class TestRecognizeCommands: ('number', '=', pr.number), ]) + # FIXME: remove / merge into mergebot tests def test_botname_casing(self, env, config, make_repo): """ Test that the botname is case-insensitive as people might write bot names capitalised or titlecased or uppercased or whatever """ repo, pr, pr_id = self.make_pr(env, config, make_repo) assert pr_id.state == 'opened' - botname = env['runbot_merge.project'].search([]).fp_github_name [a] = env['runbot_merge.branch'].search([ ('name', '=', 'a') ]) - [c] = env['runbot_merge.branch'].search([ - ('name', '=', 'c') - ]) names = [ - botname, - botname.upper(), - botname.capitalize(), - sPeNgBaB(botname), + "hansen", + "HANSEN", + "Hansen", + sPeNgBaB("hansen"), ] for n in names: - assert pr_id.limit_id == c + assert not pr_id.limit_id with repo: - pr.post_comment('@%s up to a' % n, config['role_reviewer']['token']) + pr.post_comment(f'@{n} up to a', config['role_reviewer']['token']) assert pr_id.limit_id == a # reset state - pr_id.write({'limit_id': c.id}) + pr_id.limit_id = False + # FIXME: remove / merge into mergebot tests @pytest.mark.parametrize('indent', ['', '\N{SPACE}', '\N{SPACE}'*4, '\N{TAB}']) def test_botname_indented(self, env, config, make_repo, indent): """ matching botname should ignore leading whitespaces """ repo, pr, pr_id = self.make_pr(env, config, make_repo) assert pr_id.state == 'opened' - botname = env['runbot_merge.project'].search([]).fp_github_name [a] = env['runbot_merge.branch'].search([ ('name', '=', 'a') ]) - [c] = env['runbot_merge.branch'].search([ - ('name', '=', 'c') - ]) - assert pr_id.limit_id == c + assert not pr_id.limit_id with repo: - pr.post_comment('%s@%s up to a' % (indent, botname), config['role_reviewer']['token']) + pr.post_comment(f'{indent}@hansen up to a', config['role_reviewer']['token']) assert pr_id.limit_id == a diff --git a/forwardport/tests/test_updates.py b/forwardport/tests/test_updates.py index f8af94d8..b13cd16a 100644 --- a/forwardport/tests/test_updates.py +++ b/forwardport/tests/test_updates.py @@ -3,14 +3,14 @@ Test cases for updating PRs during after the forward-porting process after the initial merge has succeeded (and forward-porting has started) """ import re -import sys import pytest -from utils import seen, re_matches, Commit, make_basic, to_pr +from utils import seen, matches, Commit, make_basic, to_pr -def test_update_pr(env, config, make_repo, users): +@pytest.mark.parametrize("merge_parent", [False, True]) +def test_update_pr(env, config, make_repo, users, merge_parent) -> None: """ Even for successful cherrypicks, it's possible that e.g. CI doesn't pass or the reviewer finds out they need to update the code. @@ -18,6 +18,14 @@ def test_update_pr(env, config, make_repo, users): only this one and its dependent should be updated? """ prod, _ = make_basic(env, config, make_repo) + # create a branch d from c so we can have 3 forward ports PRs, not just 2, + # for additional checks + env['runbot_merge.project'].search([]).write({ + 'branch_ids': [(0, 0, {'name': 'd', 'sequence': 40})] + }) + with prod: + prod.make_commits('c', Commit('1111', tree={'i': 'a'}), ref='heads/d') + with prod: [p_1] = prod.make_commits( 'a', @@ -25,11 +33,22 @@ def test_update_pr(env, config, make_repo, users): ref='heads/hugechange' ) pr = prod.make_pr(target='a', head='hugechange') - prod.post_status(p_1, 'success', 'legal/cla') - prod.post_status(p_1, 'success', 'ci/runbot') pr.post_comment('hansen r+', config['role_reviewer']['token']) + prod.post_status(p_1, 'success', 'legal/cla') + prod.post_status(p_1, 'failure', 'ci/runbot') env.run_crons() + + assert pr.comments == [ + (users['reviewer'], 'hansen r+'), + seen(env, pr, users), + (users['user'], "@{user} @{reviewer} 'ci/runbot' failed on this reviewed PR.".format_map(users)), + ] + + with prod: + prod.post_status(p_1, 'success', 'ci/runbot') + env.run_crons() + with prod: prod.post_status('staging.a', 'success', 'legal/cla') prod.post_status('staging.a', 'success', 'ci/runbot') @@ -40,7 +59,7 @@ def test_update_pr(env, config, make_repo, users): pr0_id, pr1_id = env['runbot_merge.pull_requests'].search([], order='number') fp_intermediate = (users['user'], '''\ -This PR targets b and is part of the forward-port chain. Further PRs will be created up to c. +This PR targets b and is part of the forward-port chain. Further PRs will be created up to d. More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port ''') @@ -100,15 +119,6 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port assert pr1_id.head == new_c != pr1_head, "the FP PR should be updated" assert not pr1_id.parent_id, "the FP PR should be detached from the original" - assert pr1_remote.comments == [ - seen(env, pr1_remote, users), - fp_intermediate, ci_warning, ci_warning, - (users['user'], "@%s @%s this PR was modified / updated and has become a normal PR. " - "It should be merged the normal way (via @%s)" % ( - users['user'], users['reviewer'], - pr1_id.repository.project_id.github_prefix - )), - ], "users should be warned that the PR has become non-FP" # NOTE: should the followup PR wait for pr1 CI or not? assert pr2_id.head != pr2_head assert pr2_id.parent_id == pr1_id, "the followup PR should still be linked" @@ -125,6 +135,69 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port 'x': '5' }, "the followup FP should also have the update" + with prod: + prod.post_status(pr2_id.head, 'success', 'ci/runbot') + prod.post_status(pr2_id.head, 'success', 'legal/cla') + env.run_crons() + + pr2 = prod.get_pr(pr2_id.number) + if merge_parent: + with prod: + pr2.post_comment('hansen r+', config['role_reviewer']['token']) + env.run_crons() + with prod: + prod.post_status('staging.c', 'success', 'ci/runbot') + prod.post_status('staging.c', 'success', 'legal/cla') + env.run_crons() + assert pr2_id.state == 'merged' + + _0, _1, _2, pr3_id = env['runbot_merge.pull_requests'].search([], order='number') + assert pr3_id.parent_id == pr2_id + # don't bother updating heads (?) + pr3_id.write({'parent_id': False, 'detach_reason': "testing"}) + # pump feedback messages + env.run_crons() + + pr3 = prod.get_pr(pr3_id.number) + assert pr3.comments == [ + seen(env, pr3, users), + (users['user'], f"""\ +@{users['user']} @{users['reviewer']} this PR targets d and is the last of the forward-port chain containing: +* {pr2_id.display_name} + +To merge the full chain, use +> @hansen r+ + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +"""), + (users['user'], f"@{users['user']} @{users['reviewer']} this PR was " + f"modified / updated and has become a normal PR. It " + f"must be merged directly." + ) + ] + + assert pr2.comments[:2] == [ + seen(env, pr2, users), + (users['user'], """\ +This PR targets c and is part of the forward-port chain. Further PRs will be created up to d. + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +"""), + ] + + if merge_parent: + assert pr2.comments[2:] == [ + (users['reviewer'], "hansen r+"), + ] + else: + assert pr2.comments[2:] == [ + (users['user'], f"@{users['user']} @{users['reviewer']} child PR " + f"{pr3_id.display_name} was modified / updated and has " + f"become a normal PR. This PR (and any of its parents) " + f"will need to be merged independently as approvals " + f"won't cross."), + ] + def test_update_merged(env, make_repo, config, users): """ Strange things happen when an FP gets closed / merged but then its parent is modified and the forwardport tries to update the (now merged) @@ -151,9 +224,7 @@ def test_update_merged(env, make_repo, config, users): with prod: prod.make_ref('heads/d', prod.commit('c').id) env['runbot_merge.project'].search([]).write({ - 'branch_ids': [(0, 0, { - 'name': 'd', 'sequence': 40, 'fp_target': True, - })] + 'branch_ids': [(0, 0, {'name': 'd', 'sequence': 40})] }) with prod: @@ -250,11 +321,12 @@ def test_duplicate_fw(env, make_repo, setreviewers, config, users): 'github_token': config['github']['token'], 'github_prefix': 'hansen', 'fp_github_token': config['github']['token'], + 'fp_github_name': 'herbert', 'branch_ids': [ - (0, 0, {'name': 'master', 'sequence': 0, 'fp_target': True}), - (0, 0, {'name': 'v3', 'sequence': 1, 'fp_target': True}), - (0, 0, {'name': 'v2', 'sequence': 2, 'fp_target': True}), - (0, 0, {'name': 'v1', 'sequence': 3, 'fp_target': True}), + (0, 0, {'name': 'master', 'sequence': 0}), + (0, 0, {'name': 'v3', 'sequence': 1}), + (0, 0, {'name': 'v2', 'sequence': 2}), + (0, 0, {'name': 'v1', 'sequence': 3}), ], 'repo_ids': [ (0, 0, { @@ -265,6 +337,7 @@ def test_duplicate_fw(env, make_repo, setreviewers, config, users): ] }) setreviewers(*proj.repo_ids) + env['runbot_merge.events_sources'].create({'repository': repo.name}) # create a PR in v1, merge it, then create all 3 ports with repo: @@ -304,7 +377,7 @@ def test_duplicate_fw(env, make_repo, setreviewers, config, users): with repo: repo.make_commits('v2', Commit('c0', tree={'z': 'b'}), ref=prv2.ref, make=False) env.run_crons() - assert pr_ids.mapped('state') == ['merged', 'opened', 'validated', 'validated'] + assert pr_ids.mapped('state') == ['merged', 'opened', 'opened', 'opened'] assert repo.read_tree(repo.commit(prv2_id.head)) == {'f': 'c', 'h': 'a', 'z': 'b'} assert repo.read_tree(repo.commit(prv3_id.head)) == {'f': 'd', 'i': 'a', 'z': 'b'} assert repo.read_tree(repo.commit(prmaster_id.head)) == {'f': 'e', 'z': 'b'} @@ -372,12 +445,12 @@ def test_subsequent_conflict(env, make_repo, config, users): assert repo.read_tree(repo.commit(pr3_id.head)) == { 'f': 'c', 'g': 'a', - 'h': re_matches(r'''<<<\x3c<<< HEAD + 'h': matches('''<<<\x3c<<< $$ a -|||||||| parent of [\da-f]{7,}.* +||||||| $$ ======= conflict! ->>>\x3e>>> [\da-f]{7,}.* +>>>\x3e>>> $$ '''), 'x': '0', } @@ -397,18 +470,13 @@ conflict! # 1. link to status page # 2. forward-port chain thing assert repo.get_pr(pr3_id.number).comments[2:] == [ - (users['user'], re_matches(f'''\ + (users['user'], f'''\ @{users['user']} @{users['reviewer']} WARNING: the update of {pr2_id.display_name} to {pr2_id.head} has caused a \ conflict in this pull request, data may have been lost. stdout: -```.*? -CONFLICT \\(add/add\\): Merge conflict in h.*? ``` - -stderr: -``` -\\d{{2}}:\\d{{2}}:\\d{{2}}.\\d+ .* {pr2_id.head} -error: could not apply [0-9a-f]+\\.\\.\\. newfiles -''', re.DOTALL)) +Auto-merging h +CONFLICT (add/add): Merge conflict in h +```'''), ] diff --git a/forwardport/tests/test_weird.py b/forwardport/tests/test_weird.py index ab077c05..19feda24 100644 --- a/forwardport/tests/test_weird.py +++ b/forwardport/tests/test_weird.py @@ -1,85 +1,17 @@ # -*- coding: utf-8 -*- -from datetime import datetime +from datetime import datetime, timedelta import pytest -from utils import seen, Commit, to_pr +from utils import seen, Commit, to_pr, make_basic -def make_basic(env, config, make_repo, *, fp_token, fp_remote): - """ Creates a basic repo with 3 forking branches - - 0 -- 1 -- 2 -- 3 -- 4 : a - | - `-- 11 -- 22 : b - | - `-- 111 : c - each branch just adds and modifies a file (resp. f, g and h) through the - contents sequence a b c d e - """ - Projects = env['runbot_merge.project'] - project = Projects.search([('name', '=', 'myproject')]) - if not project: - project = Projects.create({ - 'name': 'myproject', - 'github_token': config['github']['token'], - 'github_prefix': 'hansen', - 'fp_github_token': fp_token and config['github']['token'], - 'branch_ids': [ - (0, 0, {'name': 'a', 'sequence': 2, 'fp_target': True}), - (0, 0, {'name': 'b', 'sequence': 1, 'fp_target': True}), - (0, 0, {'name': 'c', 'sequence': 0, 'fp_target': True}), - ], - }) - - prod = make_repo('proj') - with prod: - a_0, a_1, a_2, a_3, a_4, = prod.make_commits( - None, - Commit("0", tree={'f': 'a'}), - Commit("1", tree={'f': 'b'}), - Commit("2", tree={'f': 'c'}), - Commit("3", tree={'f': 'd'}), - Commit("4", tree={'f': 'e'}), - ref='heads/a', - ) - b_1, b_2 = prod.make_commits( - a_2, - Commit('11', tree={'g': 'a'}), - Commit('22', tree={'g': 'b'}), - ref='heads/b', - ) - prod.make_commits( - b_1, - Commit('111', tree={'h': 'a'}), - ref='heads/c', - ) - other = prod.fork() - repo = env['runbot_merge.repository'].create({ - 'project_id': project.id, - 'name': prod.name, - 'required_statuses': 'legal/cla,ci/runbot', - 'fp_remote_target': fp_remote and other.name, - }) - env['res.partner'].search([ - ('github_login', '=', config['role_reviewer']['user']) - ]).write({ - 'review_rights': [(0, 0, {'repository_id': repo.id, 'review': True})] - }) - env['res.partner'].search([ - ('github_login', '=', config['role_self_reviewer']['user']) - ]).write({ - 'review_rights': [(0, 0, {'repository_id': repo.id, 'self_review': True})] - }) - - return project, prod, other - def test_no_token(env, config, make_repo): """ if there's no token on the repo, nothing should break though should log """ # create project configured with remotes on the repo but no token - proj, prod, _ = make_basic(env, config, make_repo, fp_token=False, fp_remote=True) + prod, _ = make_basic(env, config, make_repo, fp_token=False, fp_remote=True) with prod: prod.make_commits( @@ -109,8 +41,8 @@ def test_no_token(env, config, make_repo): "should not have created forward port" def test_remove_token(env, config, make_repo): - proj, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True) - proj.fp_github_token = False + prod, _ = make_basic(env, config, make_repo) + env['runbot_merge.project'].search([]).fp_github_token = False with prod: prod.make_commits( @@ -131,7 +63,7 @@ def test_remove_token(env, config, make_repo): "should not have created forward port" def test_no_target(env, config, make_repo): - proj, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=False) + prod, _ = make_basic(env, config, make_repo, fp_remote=False) with prod: prod.make_commits( @@ -152,7 +84,7 @@ def test_no_target(env, config, make_repo): "should not have created forward port" def test_failed_staging(env, config, make_repo): - proj, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True) + prod, _ = make_basic(env, config, make_repo) reviewer = config['role_reviewer']['token'] with prod: @@ -179,7 +111,7 @@ def test_failed_staging(env, config, make_repo): with prod: prod.post_status(pr3_id.head, 'success', 'legal/cla') prod.post_status(pr3_id.head, 'success', 'ci/runbot') - pr3.post_comment('%s r+' % proj.fp_github_name, reviewer) + pr3.post_comment('hansen r+', reviewer) env.run_crons() prod.commit('staging.c') @@ -190,19 +122,8 @@ def test_failed_staging(env, config, make_repo): prod.post_status('staging.c', 'failure', 'ci/runbot') env.run_crons() - pr3_head = env['runbot_merge.commit'].search([ - ('sha', '=', pr3_id.head), - ]) - assert len(pr3_head) == 1 - - assert not pr3_id.batch_id, "check that the PR indeed has no batch anymore" - assert not pr3_id.batch_ids.filtered(lambda b: b.active) - - assert len(env['runbot_merge.batch'].search([ - ('prs', 'in', pr3_id.id), - '|', ('active', '=', True), - ('active', '=', False), - ])) == 2, "check that there do exist batches" + pr3_head = env['runbot_merge.commit'].search([('sha', '=', pr3_id.head)]) + assert pr3_head # send a new status to the PR, as if somebody had rebuilt it or something with prod: @@ -212,6 +133,8 @@ def test_failed_staging(env, config, make_repo): assert pr3_head.to_check, "check that the commit was updated as to process" env.run_crons() assert not pr3_head.to_check, "check that the commit was processed" + assert pr3_id.state == 'ready' + assert pr3_id.staging_id class TestNotAllBranches: """ Check that forward-ports don't behave completely insanely when not all @@ -262,10 +185,11 @@ class TestNotAllBranches: 'github_token': config['github']['token'], 'github_prefix': 'hansen', 'fp_github_token': config['github']['token'], + 'fp_github_name': 'herbert', 'branch_ids': [ - (0, 0, {'name': 'a', 'sequence': 2, 'fp_target': True}), - (0, 0, {'name': 'b', 'sequence': 1, 'fp_target': True}), - (0, 0, {'name': 'c', 'sequence': 0, 'fp_target': True}), + (0, 0, {'name': 'a', 'sequence': 2}), + (0, 0, {'name': 'b', 'sequence': 1}), + (0, 0, {'name': 'c', 'sequence': 0}), ] }) repo_a = env['runbot_merge.repository'].create({ @@ -282,6 +206,7 @@ class TestNotAllBranches: 'branch_filter': '[("name", "in", ["a", "c"])]', }) setreviewers(repo_a, repo_b) + env['runbot_merge.events_sources'].create([{'repository': a.name}, {'repository': b.name}]) return project, a, a_dev, b, b_dev def test_single_first(self, env, repos, config): @@ -314,7 +239,7 @@ class TestNotAllBranches: with a: a.post_status(pr2.head, 'success', 'ci/runbot') a.get_pr(pr2.number).post_comment( - '%s r+' % project.fp_github_name, + 'hansen r+', config['role_reviewer']['token']) env.run_crons() assert pr1.staging_id @@ -353,7 +278,7 @@ class TestNotAllBranches: with b: b.post_status(pr1.head, 'success', 'ci/runbot') b.get_pr(pr1.number).post_comment( - '%s r+' % project.fp_github_name, + 'hansen r+', config['role_reviewer']['token']) env.run_crons() with a, b: @@ -401,7 +326,7 @@ class TestNotAllBranches: assert pr_a.comments == [ (users['reviewer'], 'hansen r+'), seen(env, pr_a, users), - (users['user'], "@%s @%s this pull request can not be forward ported:" + (users['user'], "@%s @%s this pull request can not be forward-ported:" " next branch is 'b' but linked pull request %s " "has a next branch 'c'." % ( users['user'], users['reviewer'], pr_b_id.display_name, @@ -410,7 +335,7 @@ class TestNotAllBranches: assert pr_b.comments == [ (users['reviewer'], 'hansen r+'), seen(env, pr_b, users), - (users['user'], "@%s @%s this pull request can not be forward ported:" + (users['user'], "@%s @%s this pull request can not be forward-ported:" " next branch is 'c' but linked pull request %s " "has a next branch 'b'." % ( users['user'], users['reviewer'], pr_a_id.display_name, @@ -428,8 +353,9 @@ def test_new_intermediate_branch(env, config, make_repo): def validate(repo, commit): repo.post_status(commit, 'success', 'ci/runbot') repo.post_status(commit, 'success', 'legal/cla') - project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True) - _, prod2, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True) + prod, _ = make_basic(env, config, make_repo) + prod2, _ = make_basic(env, config, make_repo) + project = env['runbot_merge.project'].search([]) assert len(project.repo_ids) == 2 original_c_tree = prod.read_tree(prod.commit('c')) @@ -472,6 +398,7 @@ def test_new_intermediate_branch(env, config, make_repo): with prod: validate(prod, pr0_fp_id.head) env.run_crons() + assert pr0_fp_id.state == 'validated' original0 = PRs.search([('parent_id', '=', pr0_fp_id.id)]) assert original0, "Could not find FP of PR0 to C" assert original0.target.name == 'c' @@ -514,11 +441,12 @@ def test_new_intermediate_branch(env, config, make_repo): env.run_crons() project.write({ 'branch_ids': [ - (0, False, {'name': 'new', 'sequence': 1, 'fp_target': True}), + (0, False, {'name': 'new', 'sequence': 1}), ] }) env.run_crons() + assert pr0_fp_id.state == 'validated' # created an intermediate PR for 0 and x desc0 = PRs.search([('source_id', '=', pr0_id.id)]) new0 = desc0 - pr0_fp_id - original0 @@ -574,7 +502,7 @@ def test_new_intermediate_branch(env, config, make_repo): with prod, prod2: for pr in fps.filtered(lambda p: p.target.name == 'c'): get_repo(pr).get_pr(pr.number).post_comment( - '%s r+' % project.fp_github_name, + 'hansen r+', config['role_reviewer']['token']) assert all(p.state == 'merged' for p in PRs.browse(sources)),\ "all sources should be merged" @@ -604,7 +532,7 @@ def test_new_intermediate_branch(env, config, make_repo): }, "check that new got all the updates (should be in the same state as c really)" def test_author_can_close_via_fwbot(env, config, make_repo): - project, prod, xxx = make_basic(env, config, make_repo, fp_token=True, fp_remote=True) + prod, _ = make_basic(env, config, make_repo) other_user = config['role_other'] other_token = other_user['token'] other = prod.fork(token=other_token) @@ -621,7 +549,7 @@ def test_author_can_close_via_fwbot(env, config, make_repo): pr.open(other_token) prod.post_status(c, 'success', 'legal/cla') prod.post_status(c, 'success', 'ci/runbot') - pr.post_comment('%s close' % project.fp_github_name, other_token) + pr.post_comment('hansen close', other_token) pr.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() assert pr.state == 'open' @@ -641,26 +569,26 @@ def test_author_can_close_via_fwbot(env, config, make_repo): pr1.close(other_token) # use can close via fwbot with prod: - pr1.post_comment('%s close' % project.fp_github_name, other_token) + pr1.post_comment('hansen close', other_token) env.run_crons() assert pr1.state == 'closed' assert pr1_id.state == 'closed' def test_skip_ci_all(env, config, make_repo): - project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True) + prod, _ = make_basic(env, config, make_repo) with prod: prod.make_commits('a', Commit('x', tree={'x': '0'}), ref='heads/change') pr = prod.make_pr(target='a', head='change') prod.post_status(pr.head, 'success', 'legal/cla') prod.post_status(pr.head, 'success', 'ci/runbot') - pr.post_comment('%s skipci' % project.fp_github_name, config['role_reviewer']['token']) + pr.post_comment('hansen fw=skipci', config['role_reviewer']['token']) pr.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() assert env['runbot_merge.pull_requests'].search([ ('repository.name', '=', prod.name), ('number', '=', pr.number) - ]).fw_policy == 'skipci' + ]).batch_id.fw_policy == 'skipci' with prod: prod.post_status('staging.a', 'success', 'legal/cla') @@ -679,7 +607,7 @@ def test_skip_ci_all(env, config, make_repo): assert pr2_id.source_id == pr0_id def test_skip_ci_next(env, config, make_repo): - project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True) + prod, _ = make_basic(env, config, make_repo) with prod: prod.make_commits('a', Commit('x', tree={'x': '0'}), ref='heads/change') @@ -697,10 +625,10 @@ def test_skip_ci_next(env, config, make_repo): pr0_id, pr1_id = env['runbot_merge.pull_requests'].search([], order='number') with prod: prod.get_pr(pr1_id.number).post_comment( - '%s skipci' % project.fp_github_name, - config['role_user']['token'] + 'hansen fw=skipci', + config['role_reviewer']['token'] ) - assert pr0_id.fw_policy == 'skipci' + assert pr0_id.batch_id.fw_policy == 'skipci' env.run_crons() _, _, pr2_id = env['runbot_merge.pull_requests'].search([], order='number') @@ -717,7 +645,8 @@ def test_retarget_after_freeze(env, config, make_repo, users): latter port. In that case the reinsertion task should just do nothing, and the retargeted PR should be forward-ported normally once merged. """ - project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True) + prod, _ = make_basic(env, config, make_repo) + project = env['runbot_merge.project'].search([]) with prod: [c] = prod.make_commits('b', Commit('thing', tree={'x': '1'}), ref='heads/mypr') pr = prod.make_pr(target='b', head='mypr') @@ -748,7 +677,7 @@ def test_retarget_after_freeze(env, config, make_repo, users): project.write({ 'branch_ids': [ (1, branch_c.id, {'sequence': 1}), - (0, 0, {'name': 'bprime', 'sequence': 2, 'fp_target': True}), + (0, 0, {'name': 'bprime', 'sequence': 2}), (1, branch_b.id, {'sequence': 3}), (1, branch_a.id, {'sequence': 4}), ] @@ -766,7 +695,7 @@ def test_retarget_after_freeze(env, config, make_repo, users): port_pr.base = 'bprime' assert port_id.target == new_branch - env.run_crons('forwardport.port_forward') + env.run_crons(None) assert not job.exists(), "job should have succeeded and apoptosed" # since the PR was "already forward-ported" to the new branch it should not @@ -784,13 +713,16 @@ def test_retarget_after_freeze(env, config, make_repo, users): prod.post_status('staging.bprime', 'success', 'legal/cla') env.run_crons() + # #2 batch 6 (???) + assert port_id.state == 'merged' + new_pr_id = env['runbot_merge.pull_requests'].search([('state', 'not in', ('merged', 'closed'))]) assert len(new_pr_id) == 1 assert new_pr_id.parent_id == port_id assert new_pr_id.target == branch_c def test_approve_draft(env, config, make_repo, users): - _, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True) + prod, _ = make_basic(env, config, make_repo) with prod: prod.make_commits('a', Commit('x', tree={'x': '0'}), ref='heads/change') @@ -803,7 +735,7 @@ def test_approve_draft(env, config, make_repo, users): assert pr.comments == [ (users['reviewer'], 'hansen r+'), seen(env, pr, users), - (users['user'], f"I'm sorry, @{users['reviewer']}: draft PRs can not be approved."), + (users['user'], f"@{users['reviewer']} draft PRs can not be approved."), ] with prod: @@ -818,8 +750,14 @@ def test_freeze(env, config, make_repo, users): """Freeze: - should not forward-port the freeze PRs themselves + - unmerged forward ports need to be backfilled + - if the tip of the forward port is approved, the backfilled forward port + should also be """ - project, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True) + prod, _ = make_basic(env, config, make_repo, statuses='default') + project = env['runbot_merge.project'].search([]) + + # branches here are "a" (older), "b", and "c" (master) with prod: [root, _] = prod.make_commits( @@ -829,6 +767,22 @@ def test_freeze(env, config, make_repo, users): ref='heads/b' ) prod.make_commits(root, Commit('other', tree={'f': '1'}), ref='heads/c') + + # region PR which is forward ported but the FPs are not merged (they are approved) + with prod: + prod.make_commits("a", Commit("stuff", tree={'x': '0'}), ref="heads/abranch") + p = prod.make_pr(target='a', head='abranch') + p.post_comment("hansen r+ fw=skipci", config['role_reviewer']['token']) + prod.post_status('abranch', 'success') + env.run_crons() + with prod: + prod.post_status('staging.a', 'success') + env.run_crons() + pr_a_id, pr_b_id, pr_c_id = pr_ids = env['runbot_merge.pull_requests'].search([], order='number') + assert len(pr_ids) == 3, \ + "should have created two forward ports, one in b and one in c (/ master)" + # endregion + with prod: prod.make_commits( 'c', @@ -838,6 +792,15 @@ def test_freeze(env, config, make_repo, users): release = prod.make_pr(target='c', head='release-1.1') env.run_crons() + # approve pr_c_id but don't actually merge it before freezing + with prod: + prod.post_status(pr_b_id.head, 'success') + prod.post_status(pr_c_id.head, 'success') + prod.get_pr(pr_c_id.number).post_comment('hansen r+', config['role_reviewer']['token']) + # review comment should be handled eagerly + assert pr_b_id.reviewed_by + assert pr_c_id.reviewed_by + w = project.action_prepare_freeze() assert w['res_model'] == 'runbot_merge.project.freeze' w_id = env[w['res_model']].browse([w['res_id']]) @@ -848,20 +811,46 @@ def test_freeze(env, config, make_repo, users): assert not w_id.errors w_id.action_freeze() + assert project.branch_ids.mapped('name') == ['c', 'post-b', 'b', 'a'] + # re-enable forward-port cron after freeze _, cron_id = env['ir.model.data'].check_object_reference('forwardport', 'port_forward', context={'active_test': False}) env['ir.cron'].browse([cron_id]).active = True - - # run crons to process the feedback, run a second time in case of e.g. - # forward porting - env.run_crons() - env.run_crons() + env.run_crons('forwardport.port_forward') assert release_id.state == 'merged' assert not env['runbot_merge.pull_requests'].search([ - ('state', '!=', 'merged') + ('source_id', '=', release_id.id), ]), "the release PRs should not be forward-ported" + assert env['runbot_merge.stagings'].search_count([]) == 2,\ + "b and c forward ports should be staged since they were ready before freeze" + + # an intermediate PR should have been created + pr_inserted = env['runbot_merge.pull_requests'].search([ + ('source_id', '=', pr_a_id.id), + ('target.name', '=', 'post-b'), + ]) + assert pr_inserted, "an intermediate PR should have been reinsered in the sequence" + assert pr_c_id.parent_id == pr_inserted + assert pr_inserted.parent_id == pr_b_id + + assert pr_inserted.reviewed_by == pr_c_id.reviewed_by,\ + "review state should have been copied over from c (master)" + with prod: + prod.post_status(pr_inserted.head, 'success') + prod.post_status('staging.b', 'success') + prod.post_status('staging.c', 'success') + env.run_crons() + with prod: + prod.post_status('staging.post-b', 'success') + env.run_crons() + + assert env['runbot_merge.pull_requests'].search_count([('state', '=', 'merged')]) \ + == len(['release', 'initial', 'fw-b', 'fw-post-b', 'fw-c']) + + +@pytest.mark.expect_log_errors(reason="missing / invalid head causes an error to be logged") def test_missing_magic_ref(env, config, make_repo): """There are cases where github fails to create / publish or fails to update the magic refs in refs/pull/*. @@ -873,7 +862,7 @@ def test_missing_magic_ref(env, config, make_repo): Emulate this behaviour by updating the PR with a commit which lives in the repo but has no ref. """ - _, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True) + prod, _ = make_basic(env, config, make_repo) a_head = prod.commit('refs/heads/a') with prod: [c] = prod.make_commits(a_head.id, Commit('x', tree={'x': '0'}), ref='heads/change') @@ -903,7 +892,7 @@ def test_missing_magic_ref(env, config, make_repo): # check that the batch is still here and targeted for the future req = env['forwardport.batches'].search([]) assert len(req) == 1 - assert req.retry_after > datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + assert req.retry_after > datetime.utcnow().isoformat(" ", "seconds") # reset retry_after req.retry_after = '1900-01-01 01:01:01' @@ -912,7 +901,7 @@ def test_missing_magic_ref(env, config, make_repo): [c2] = prod.make_commits(a_head.id, Commit('y', tree={'x': '0'})) assert c2 != c pr_id.head = c2 - env.run_crons() + env.run_crons(None) fp_id = env['runbot_merge.pull_requests'].search([('source_id', '=', pr_id.id)]) assert fp_id @@ -920,3 +909,308 @@ def test_missing_magic_ref(env, config, make_repo): # what they are (rather than e.g. diff the HEAD it branch with the target) # as a result it doesn't forwardport our fake, we'd have to reset the PR's # branch for that to happen + +def test_disable_branch_with_batches(env, config, make_repo, users): + """We want to avoid losing pull requests, so when deactivating a branch, + if there are *forward port* batches targeting that branch which have not + been forward ported yet port them over, as if their source had been merged + after the branch was disabled (thus skipped over) + """ + repo, fork = make_basic(env, config, make_repo, statuses="default") + proj = env['runbot_merge.project'].search([]) + branch_b = env['runbot_merge.branch'].search([('name', '=', 'b')]) + assert branch_b + + # region repo2 creation & setup + repo2 = make_repo('proj2') + with repo2: + [a, b, c] = repo2.make_commits( + None, + Commit("a", tree={"f": "a"}), + Commit("b", tree={"g": "b"}), + Commit("c", tree={"h": "c"}), + ) + repo2.make_ref("heads/a", a) + repo2.make_ref("heads/b", b) + repo2.make_ref("heads/c", c) + fork2 = repo2.fork() + repo2_id = env['runbot_merge.repository'].create({ + "project_id": proj.id, + "name": repo2.name, + "required_statuses": "default", + "fp_remote_target": fork2.name, + }) + env['runbot_merge.events_sources'].create({'repository': repo2.name}) + env['res.partner'].search([ + ('github_login', '=', config['role_reviewer']['user']) + ]).write({ + 'review_rights': [(0, 0, {'repository_id': repo2_id.id, 'review': True})] + }) + env['res.partner'].search([ + ('github_login', '=', config['role_self_reviewer']['user']) + ]).write({ + 'review_rights': [(0, 0, {'repository_id': repo2_id.id, 'self_review': True})] + }) + # endregion + + # region set up forward ported batches + with repo, fork, repo2, fork2: + fork.make_commits("a", Commit("x", tree={"x": "1"}), ref="heads/x") + pr1_a = repo.make_pr(title="X", target="a", head=f"{fork.owner}:x") + pr1_a.post_comment("hansen r+", config['role_reviewer']['token']) + repo.post_status(pr1_a.head, "success") + + fork2.make_commits("a", Commit("x", tree={"x": "1"}), ref="heads/x") + pr2_a = repo2.make_pr(title="X", target="a", head=f"{fork2.owner}:x") + pr2_a.post_comment("hansen r+", config['role_reviewer']['token']) + repo2.post_status(pr2_a.head, "success") + + fork.make_commits("a", Commit("y", tree={"y": "1"}), ref="heads/y") + pr3_a = repo.make_pr(title="Y", target="a", head=f"{fork.owner}:y") + pr3_a.post_comment("hansen r+", config['role_reviewer']['token']) + repo.post_status(pr3_a.head, 'success') + # remove just pr2 from the forward ports (maybe?) + pr2_a_id = to_pr(env, pr2_a) + pr2_a_id.limit_id = branch_b.id + env.run_crons() + assert pr2_a_id.limit_id == branch_b + # endregion + + with repo, repo2: + repo.post_status('staging.a', 'success') + repo2.post_status('staging.a', 'success') + env.run_crons() + + PullRequests = env['runbot_merge.pull_requests'] + pr1_b_id = PullRequests.search([('parent_id', '=', to_pr(env, pr1_a).id)]) + pr2_b_id = PullRequests.search([('parent_id', '=', pr2_a_id.id)]) + pr3_b_id = PullRequests.search([('parent_id', '=', to_pr(env, pr3_a).id)]) + assert pr1_b_id.parent_id + assert pr1_b_id.state == 'opened' + assert pr2_b_id.parent_id + assert pr2_b_id.state == 'opened' + assert pr3_b_id.parent_id + assert pr3_b_id.state == 'opened' + # detach pr3 (?) + pr3_b_id.write({'parent_id': False, 'detach_reason': 'because'}) + + b_id = proj.branch_ids.filtered(lambda b: b.name == 'b') + proj.write({ + 'branch_ids': [(1, b_id.id, {'active': False})] + }) + env.run_crons() + assert not b_id.active + # pr1_a, pr1_b, pr1_c, pr2_a, pr2_b, pr3_a, pr3_b, pr3_c + assert PullRequests.search_count([]) == 8, "should have ported pr1 and pr3 but not pr2" + assert PullRequests.search_count([('parent_id', '=', pr1_b_id.id)]) + assert PullRequests.search_count([('parent_id', '=', pr3_b_id.id)]) + + assert repo.get_pr(pr1_b_id.number).comments == [ + seen(env, repo.get_pr(pr1_b_id.number), users), + (users['user'], "This PR targets b and is part of the forward-port chain. Further PRs will be created up to c.\n\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n"), + (users['user'], "@{user} @{reviewer} the target branch 'b' has been disabled, you may want to close this PR.\n\nAs this was not its limit, it will automatically be forward ported to the next active branch.".format_map(users)), + ] + assert repo2.get_pr(pr2_b_id.number).comments == [ + seen(env, repo2.get_pr(pr2_b_id.number), users), + (users['user'], """\ +@{user} @{reviewer} this PR targets b and is the last of the forward-port chain. + +To merge the full chain, use +> @hansen r+ + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +""".format_map(users)), + (users['user'], "@{user} @{reviewer} the target branch 'b' has been disabled, you may want to close this PR.".format_map(users)), + ] + +def test_disable_multitudes(env, config, make_repo, users, setreviewers): + """Ensure that deactivation ports can jump over other deactivated branches. + """ + # region setup + repo = make_repo("bob") + project = env['runbot_merge.project'].create({ + "name": "bob", + "github_token": config['github']['token'], + "github_prefix": "hansen", + "fp_github_token": config['github']['token'], + "fp_github_name": "herbert", + "branch_ids": [ + (0, 0, {'name': 'a', 'sequence': 90}), + (0, 0, {'name': 'b', 'sequence': 80}), + (0, 0, {'name': 'c', 'sequence': 70}), + (0, 0, {'name': 'd', 'sequence': 60}), + ], + "repo_ids": [(0, 0, { + 'name': repo.name, + 'required_statuses': 'default', + 'fp_remote_target': repo.name, + })], + }) + setreviewers(project.repo_ids) + env['runbot_merge.events_sources'].create({'repository': repo.name}) + + with repo: + [a, b, c, d] = repo.make_commits( + None, + Commit("a", tree={"branch": "a"}), + Commit("b", tree={"branch": "b"}), + Commit("c", tree={"branch": "c"}), + Commit("d", tree={"branch": "d"}), + ) + repo.make_ref("heads/a", a) + repo.make_ref("heads/b", b) + repo.make_ref("heads/c", c) + repo.make_ref("heads/d", d) + # endregion + + with repo: + [a] = repo.make_commits("a", Commit("X", tree={"x": "1"}), ref="heads/x") + pra = repo.make_pr(target="a", head="x") + pra.post_comment("hansen r+", config['role_reviewer']['token']) + repo.post_status(a, "success") + env.run_crons() + + with repo: + repo.post_status('staging.a', 'success') + env.run_crons() + + pra_id = to_pr(env, pra) + assert pra_id.state == 'merged' + + prb_id = env['runbot_merge.pull_requests'].search([('target.name', '=', 'b')]) + assert prb_id.parent_id == pra_id + + project.write({ + 'branch_ids': [ + (1, b.id, {'active': False}) + for b in env['runbot_merge.branch'].search([('name', 'in', ['b', 'c'])]) + ] + }) + env.run_crons() + + # should not have ported prb to the disabled branch c + assert not env['runbot_merge.pull_requests'].search([('target.name', '=', 'c')]) + + # should have ported prb to the active branch d + prd_id = env['runbot_merge.pull_requests'].search([('target.name', '=', 'd')]) + assert prd_id + assert prd_id.parent_id == prb_id + + prb = repo.get_pr(prb_id.number) + assert prb.comments == [ + seen(env, prb, users), + (users['user'], 'This PR targets b and is part of the forward-port chain. Further PRs will be created up to d.\n\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n'), + (users['user'], """\ +@{user} @{reviewer} the target branch 'b' has been disabled, you may want to close this PR. + +As this was not its limit, it will automatically be forward ported to the next active branch.\ +""".format_map(users)), + ] + prd = repo.get_pr(prd_id.number) + assert prd.comments == [ + seen(env, prd, users), + (users['user'], """\ +@{user} @{reviewer} this PR targets d and is the last of the forward-port chain. + +To merge the full chain, use +> @hansen r+ + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +""".format_map(users)) + ] + +FMT = '%Y-%m-%d %H:%M:%S' +FAKE_PREV_WEEK = (datetime.now() + timedelta(days=1)).strftime(FMT) +def test_reminder_detached(env, config, make_repo, users): + """On detached forward ports, both sides of the detachment should be notified. + """ + # region setup + prod, _ = make_basic(env, config, make_repo, statuses='default') + with prod: + prod.make_commits('a', Commit('c', tree={'x': '0'}), ref="heads/abranch") + pr_a = prod.make_pr(target='a', head='abranch') + prod.post_status('abranch', 'success') + pr_a.post_comment('hansen r+ fw=skipci', config['role_reviewer']['token']) + env.run_crons() + + with prod: + prod.post_status('staging.a', 'success') + env.run_crons() + + pr_a_id = to_pr(env, pr_a) + pr_b_id = env['runbot_merge.pull_requests'].search([ + ('target.name', '=', 'b'), + ('parent_id', '=', pr_a_id.id), + ]) + assert pr_b_id + with prod: + prod.post_status(pr_b_id.head, 'success') + env.run_crons() + pr_c_id = env['runbot_merge.pull_requests'].search([ + ('target.name', '=', 'c'), + ('parent_id', '=', pr_b_id.id), + ]) + assert pr_c_id + # endregion + + pr_b = prod.get_pr(pr_b_id.number) + pr_c = prod.get_pr(pr_c_id.number) + + # region sanity check + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + + assert pr_b.comments == [ + seen(env, pr_b, users), + (users['user'], """\ +This PR targets b and is part of the forward-port chain. Further PRs will be created up to c. + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +""")], "the intermediate PR should not be reminded" + + assert pr_c.comments == [ + seen(env, pr_c, users), + (users['user'], """\ +@%s @%s this PR targets c and is the last of the forward-port chain containing: +* %s + +To merge the full chain, use +> @hansen r+ + +More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port +""" % ( + users['user'], users['reviewer'], + pr_b_id.display_name, + )), + (users['user'], "@%s @%s this forward port of %s is awaiting action (not merged or closed)." % ( + users['user'], + users['reviewer'], + pr_a_id.display_name, + )) + ], "the final PR should be reminded" + # endregion + + # region check detached + pr_c_id.write({'parent_id': False, 'detach_reason': 'because'}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + + assert pr_b.comments[2:] == [ + (users['user'], "@%s @%s child PR %s was modified / updated and has become a normal PR. This PR (and any of its parents) will need to be merged independently as approvals won't cross." % ( + users['user'], + users['reviewer'], + pr_c_id.display_name, + )), + (users['user'], "@%s @%s this forward port of %s is awaiting action (not merged or closed)." % ( + users['user'], + users['reviewer'], + pr_a_id.display_name, + )) + ], "the detached-from intermediate PR should now be reminded" + assert pr_c.comments[3:] == [ + (users['user'], "@%(user)s @%(reviewer)s this PR was modified / updated and has become a normal PR. It must be merged directly." % users), + (users['user'], "@%s @%s this forward port of %s is awaiting action (not merged or closed)." % ( + users['user'], + users['reviewer'], + pr_a_id.display_name, + )) + ], "the final forward port should be reminded as before" + # endregion diff --git a/mergebot_test_utils/utils.py b/mergebot_test_utils/utils.py index 2df8995d..c5473e93 100644 --- a/mergebot_test_utils/utils.py +++ b/mergebot_test_utils/utils.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import itertools import re +import time from lxml import html @@ -41,29 +42,76 @@ def _simple_init(repo): prx = repo.make_pr(title='title', body='body', target='master', head=c2) return prx -class re_matches: +class matches(str): + # necessary so str.__new__ does not freak out on `flags` + def __new__(cls, pattern, flags=0): + return super().__new__(cls, pattern) + def __init__(self, pattern, flags=0): - self._r = re.compile(pattern, flags) + p, n = re.subn( + # `re.escape` will escape the `$`, so we need to handle that... + # maybe it should not be $? + r'\\\$(\w*?)\\\$', + lambda m: f'(?P<{m[1]}>.*?)' if m[1] else '(.*?)', + re.escape(self), + ) + assert n, f"matches' pattern should have at least one placeholder, found none in\n{pattern}" + self._r = re.compile(p, flags | re.DOTALL) def __eq__(self, text): - return self._r.match(text) - - def __repr__(self): - return self._r.pattern + '...' + if not isinstance(text, str): + return NotImplemented + return self._r.search(text) def seen(env, pr, users): - return users['user'], f'[Pull request status dashboard]({to_pr(env, pr).url}).' + url = to_pr(env, pr).url + return users['user'], f'[![Pull request status dashboard]({url}.png)]({url})' -def make_basic(env, config, make_repo, *, reponame='proj', project_name='myproject'): - """ Creates a basic repo with 3 forking branches +def make_basic( + env, + config, + make_repo, + *, + project_name='myproject', + reponame='proj', + statuses='legal/cla,ci/runbot', + fp_token=True, + fp_remote=True, +): + """ Creates a project ``project_name`` **if none exists**, otherwise + retrieves the existing one and adds a new repository and its fork. + + Repositories are setup with three forking branches: + + :: + + f = 0 -- 1 -- 2 -- 3 -- 4 : a + | + g = `-- 11 -- 22 : b + | + h = `-- 111 : c - f = 0 -- 1 -- 2 -- 3 -- 4 : a - | - g = `-- 11 -- 22 : b - | - h = `-- 111 : c each branch just adds and modifies a file (resp. f, g and h) through the contents sequence a b c d e + + :param env: Environment, for odoo model interactions + :param config: pytest project config thingie + :param make_repo: repo maker function, normally the fixture, should be a + ``Callable[[str], Repo]`` + :param project_name: internal project name, can be used to recover the + project object afterward, matches exactly since it's + unique per odoo db (and thus test) + :param reponame: the base name of the repository, for identification, for + concurrency reasons the actual repository name *will* be + different + :param statuses: required statuses for the repository, stupidly default to + the old Odoo statuses, should be moved to ``default`` over + time for simplicity (unless the test specifically calls for + multiple statuses) + :param fp_token: whether to set the ``fp_github_token`` on the project if + / when creating it + :param fp_remote: whether to create a fork repo and set it as the + repository's ``fp_remote_target`` """ Projects = env['runbot_merge.project'] project = Projects.search([('name', '=', project_name)]) @@ -72,15 +120,17 @@ def make_basic(env, config, make_repo, *, reponame='proj', project_name='myproje 'name': project_name, 'github_token': config['github']['token'], 'github_prefix': 'hansen', - 'fp_github_token': config['github']['token'], + 'fp_github_token': fp_token and config['github']['token'], + 'fp_github_name': 'herbert', 'branch_ids': [ - (0, 0, {'name': 'a', 'sequence': 100, 'fp_target': True}), - (0, 0, {'name': 'b', 'sequence': 80, 'fp_target': True}), - (0, 0, {'name': 'c', 'sequence': 60, 'fp_target': True}), + (0, 0, {'name': 'a', 'sequence': 100}), + (0, 0, {'name': 'b', 'sequence': 80}), + (0, 0, {'name': 'c', 'sequence': 60}), ], }) prod = make_repo(reponame) + env['runbot_merge.events_sources'].create({'repository': prod.name}) with prod: a_0, a_1, a_2, a_3, a_4, = prod.make_commits( None, @@ -102,12 +152,13 @@ def make_basic(env, config, make_repo, *, reponame='proj', project_name='myproje Commit('111', tree={'h': 'a'}), ref='heads/c', ) - other = prod.fork() + other = prod.fork() if fp_remote else None repo = env['runbot_merge.repository'].create({ 'project_id': project.id, 'name': prod.name, - 'required_statuses': 'legal/cla,ci/runbot', - 'fp_remote_target': other.name, + 'required_statuses': statuses, + 'fp_remote_target': other.name if other else False, + 'group_id': False, }) env['res.partner'].search([ ('github_login', '=', config['role_reviewer']['user']) @@ -126,14 +177,26 @@ def pr_page(page, pr): return html.fromstring(page(f'/{pr.repo.name}/pull/{pr.number}')) def to_pr(env, pr): - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', pr.repo.name), - ('number', '=', pr.number), - ]) - assert len(pr) == 1, f"Expected to find {pr.repo.name}#{pr.number}, got {pr}." - return pr + for _ in range(5): + pr_id = env['runbot_merge.pull_requests'].search([ + ('repository.name', '=', pr.repo.name), + ('number', '=', pr.number), + ]) + if pr_id: + assert len(pr_id) == 1, f"Expected to find {pr.repo.name}#{pr.number}, got {pr_id}." + return pr_id + time.sleep(1) + + raise TimeoutError(f"Unable to find {pr.repo.name}#{pr.number}") def part_of(label, pr_id, *, separator='\n\n'): """ Adds the "part-of" pseudo-header in the footer. """ - return f'{label}{separator}Part-of: {pr_id.display_name}' + return f"""\ +{label}{separator}\ +Part-of: {pr_id.display_name} +Signed-off-by: {pr_id.reviewed_by.formatted_email}""" + +def ensure_one(records): + assert len(records) == 1 + return records diff --git a/runbot_merge/__init__.py b/runbot_merge/__init__.py index f4821304..8c566c2a 100644 --- a/runbot_merge/__init__.py +++ b/runbot_merge/__init__.py @@ -1,46 +1,10 @@ -import logging -from os import environ - -import sentry_sdk -from sentry_sdk.integrations.logging import LoggingIntegration -from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware - -from odoo import http from . import models, controllers +from .sentry import enable_sentry -def delegate(self, attr): - return getattr(self.app, attr) -SentryWsgiMiddleware.__getattr__ = delegate - -def enable_sentry(): - logger = logging.getLogger('runbot_merge') - - dsn = environ.get('SENTRY_DSN') - if not dsn: - logger.info("No DSN found, skipping sentry...") - return - - try: - sentry_sdk.init( - dsn, - integrations=[ - # note: if the colorformatter is enabled, sentry gets lost - # and classifies everything as errors because it fails to - # properly classify levels as the colorformatter injects - # the ANSI color codes right into LogRecord.levelname - LoggingIntegration(level=logging.INFO, event_level=logging.WARNING), - ] - ) - http.root = SentryWsgiMiddleware(http.root) - except Exception: - logger.exception("DSN found, failed to enable sentry...") - else: - logger.info("DSN found, sentry enabled...") - -def _check_citext(cr): - cr.execute("select 1 from pg_extension where extname = 'citext'") - if not cr.rowcount: +def _check_citext(env): + env.cr.execute("select 1 from pg_extension where extname = 'citext'") + if not env.cr.rowcount: try: - cr.execute('create extension citext') + env.cr.execute('create extension citext') except Exception: raise AssertionError("runbot_merge needs the citext extension") diff --git a/runbot_merge/__manifest__.py b/runbot_merge/__manifest__.py index 82978d76..a21ce712 100644 --- a/runbot_merge/__manifest__.py +++ b/runbot_merge/__manifest__.py @@ -1,14 +1,18 @@ { 'name': 'merge bot', - 'version': '1.7', - 'depends': ['contacts', 'website'], + 'version': '1.15', + 'depends': ['contacts', 'mail', 'website'], 'data': [ 'security/security.xml', 'security/ir.model.access.csv', 'data/merge_cron.xml', + 'models/crons/git_maintenance.xml', + 'models/crons/cleanup_scratch_branches.xml', + 'data/runbot_merge.pull_requests.feedback.template.csv', 'views/res_partner.xml', 'views/runbot_merge_project.xml', + 'views/batch.xml', 'views/mergebot.xml', 'views/queues.xml', 'views/configuration.xml', diff --git a/runbot_merge/changelog/2023-08/opts.md b/runbot_merge/changelog/2023-08/opts.md new file mode 100644 index 00000000..84cc99b0 --- /dev/null +++ b/runbot_merge/changelog/2023-08/opts.md @@ -0,0 +1,6 @@ +IMP: optimize home page + +An unnecessary deopt and a few opportunities were found and fixed in the home +page / main dashboard, a few improvements have been implemented which should +significantly lower the number of SQL queries and the time needed to generate +the page. diff --git a/runbot_merge/changelog/2023-08/staging-reverse-index.md b/runbot_merge/changelog/2023-08/staging-reverse-index.md new file mode 100644 index 00000000..e276ed40 --- /dev/null +++ b/runbot_merge/changelog/2023-08/staging-reverse-index.md @@ -0,0 +1,6 @@ +ADD: stagings reverse index (from commits) + +Finding out the commits from a staging is not great but it's easy enough, the +reverse was difficult and very inefficient. Splat out the "heads" JSON field +into two join tables, and provide both ORM methods and a JSON endpoint to +lookup stagings based on their commits. diff --git a/runbot_merge/changelog/2023-08/stagings-to-prs.md b/runbot_merge/changelog/2023-08/stagings-to-prs.md new file mode 100644 index 00000000..0deb8aa8 --- /dev/null +++ b/runbot_merge/changelog/2023-08/stagings-to-prs.md @@ -0,0 +1,5 @@ +IMP: added quick jump from staging to PR in the backend + +In the backend, going through the batches to reach a PR is really not +convenient, directly displaying both github URL and frontend URL for each PR +makes jumping around much easier. diff --git a/runbot_merge/changelog/2023-10/free-the-limit.md b/runbot_merge/changelog/2023-10/free-the-limit.md new file mode 100644 index 00000000..4cdee310 --- /dev/null +++ b/runbot_merge/changelog/2023-10/free-the-limit.md @@ -0,0 +1,8 @@ +IMP: allow setting forward-port limits after the source pull request has been merged + +Should now be possible to both extend and retract the forward port limit +afterwards, though obviously no shorter than the current tip of the forward +port sequence. One limitation is that forward ports being created can't be +stopped so there might be some windows where trying to set the limit to the +current tip will fail (because it's in the process of being forward-ported to +the next branch). diff --git a/runbot_merge/changelog/2023-12/commands.md b/runbot_merge/changelog/2023-12/commands.md new file mode 100644 index 00000000..1d054179 --- /dev/null +++ b/runbot_merge/changelog/2023-12/commands.md @@ -0,0 +1,57 @@ +CHG: complete rework of the commands system + +# fun is dead: strict commands parsing + +Historically the bots would apply whatever looked like a command and ignore the +rest. This led to people sending novels to the bot, then being surprised the bot +found a command in the mess. + +The bots now ignore all lines which contain any non-command. Example: + +> @robodoo r+ when green darling + +Previously, the bot would apply the `r+` and ignore the rest. Now the bot will +ignore everything and reply with + +> unknown command "when" + +# fwbot is dead + +The mergebot (@robodoo) is now responsible for the old fwbot commands: + +- close, ignore, up to, ... work as they ever did, just with robodoo +- `robodoo r+` now approves the parents if the current PR a forward port + - a specific PR can be approved even in forward ports by providing its number + e.g. `robodoo r=45328` will approve just PR 45328, if that is the PR the + comment is being posted on or one of its parents + - the approval of forward ports won't skip over un-approvable PRs anymore + - the rights of the original author have been restricted slightly: they can + only approve the direct descendents of merged PRs, so if one of the parents + has been modified and is not merged yet, the original author can't approve, + nor can they approve the modified PR, or a conflicting PR which has to get + fixed (?) + +# no more p=<number> + +The old priorities command was a tangle of multiple concerns, not all of which +were always desired or applicable. These tangles have been split along their +various axis. + +# listing + +The new commands are: + +- `default`, sets the staging priority back to the default +- `priority`, sets the staging priority to elevated, on staging these PRs are + staged first, then the `normal` PRs are added +- `alone`, sets the staging priority to high, these PRs are staged before + considering splits, and only `alone` PRs are staged together even if the batch + is not full +- `fw=default`, processes forward ports normally +- `fw=skipci`, once the current PR has been merged creates all the forward ports + without waiting for each to have valid statuses +- `fw=skipmerge`, immediately create all forward ports even if the base pull + request has not even been merged yet +- `skipchecks`, makes the entire batch (target PR and any linked PR) immediately + ready, bypassing statuses and reviews +- `cancel`, cancels the staging on the target branch, if any diff --git a/runbot_merge/changelog/2023-12/staging-priority.md b/runbot_merge/changelog/2023-12/staging-priority.md new file mode 100644 index 00000000..35bcfec8 --- /dev/null +++ b/runbot_merge/changelog/2023-12/staging-priority.md @@ -0,0 +1,4 @@ +ADD: projects now know how to prioritise new PRs over splits + +While this likely has relatively low utility, we'll look at how it performs +during periods of high throughput. diff --git a/runbot_merge/changelog/2023-12/staging-shutdown.md b/runbot_merge/changelog/2023-12/staging-shutdown.md new file mode 100644 index 00000000..1b22a65c --- /dev/null +++ b/runbot_merge/changelog/2023-12/staging-shutdown.md @@ -0,0 +1,14 @@ +ADD: stagings can now be disabled on a per-project basis + +Currently stopping stagings requires stopping the staging cron(s), which causes +several issues: + +- the staging cron runs very often, so it can be difficult to find a window to + deactivate it (as the cron runner acquires an exclusive lock on the cron) +- the staging cron is global, so it does not disable staging only on the + problematic project (to say nothing of branch) but on all of them + +The latter is not currently a huge issue as only one of the mergebot-tracked +projects is ultra active (spreadsheet activity is on the order of a few +single-PR stagings a day), but the former is really annoying when trying to +stop runaway broken stagings. diff --git a/runbot_merge/changelog/2024-08/description.md b/runbot_merge/changelog/2024-08/description.md new file mode 100644 index 00000000..90fdf25b --- /dev/null +++ b/runbot_merge/changelog/2024-08/description.md @@ -0,0 +1,10 @@ +IMP: PR descriptions are now markdown-rendered in the dashboard + +Previously the raw text was displayed. The main advantage of rendering, aside +from not splatting huge links in the middle of the thing, is that we can +autolink *odoo tasks* if they're of a pattern we recognize. Some support has +also been added for github's references to mirror GFM rendering. + +This would be a lot less useful (and in fact pretty much useless) if we could +use github's built-in [references to external resources](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/managing-repository-settings/configuring-autolinks-to-reference-external-resources) +sadly that seems to not be available on our plan. diff --git a/runbot_merge/controllers/__init__.py b/runbot_merge/controllers/__init__.py index 87b84606..268a05e4 100644 --- a/runbot_merge/controllers/__init__.py +++ b/runbot_merge/controllers/__init__.py @@ -3,6 +3,7 @@ import hmac import logging import json +import sentry_sdk import werkzeug.exceptions from odoo.http import Controller, request, route @@ -14,44 +15,126 @@ from .. import utils, github _logger = logging.getLogger(__name__) class MergebotController(Controller): + @route('/runbot_merge/stagings', auth='none', type='json') + def stagings_for_commits(self, commits=None, heads=None): + Stagings = request.env(user=1)['runbot_merge.stagings'].sudo() + if commits: + stagings = Stagings.for_commits(*commits) + elif heads: + stagings = Stagings.for_heads(*heads) + else: + raise ValueError('Must receive one of "commits" or "heads" kwarg') + + return stagings.ids + + @route('/runbot_merge/stagings/<int:staging>', auth='none', type='json') + def prs_for_staging(self, staging): + staging = request.env(user=1)['runbot_merge.stagings'].browse(staging) + return [ + batch.prs.mapped(lambda p: { + 'name': p.display_name, + 'repository': p.repository.name, + 'number': p.number, + }) + for batch in staging.sudo().batch_ids + ] + + @route('/runbot_merge/stagings/<int:from_staging>/<int:to_staging>', auth='none', type='json') + def prs_for_stagings(self, from_staging, to_staging, include_from=True, include_to=True): + Stagings = request.env(user=1, context={"active_test": False})['runbot_merge.stagings'] + from_staging = Stagings.browse(from_staging) + to_staging = Stagings.browse(to_staging) + if from_staging.target != to_staging.target: + raise ValueError(f"Stagings must have the same target branch, found {from_staging.target.name} and {to_staging.target.name}") + if from_staging.id >= to_staging.id: + raise ValueError("first staging must be older than second staging") + + stagings = Stagings.search([ + ('target', '=', to_staging.target.id), + ('state', '=', 'success'), + ('id', '>=' if include_from else '>', from_staging.id), + ('id', '<=' if include_to else '<', to_staging.id), + ], order="id asc") + + return [ + { + 'staging': staging.id, + 'prs': [ + batch.prs.mapped(lambda p: { + 'name': p.display_name, + 'repository': p.repository.name, + 'number': p.number, + }) + for batch in staging.batch_ids + ] + } + for staging in stagings + ] + + @route('/runbot_merge/hooks', auth='none', type='json', csrf=False, methods=['POST']) def index(self): req = request.httprequest event = req.headers['X-Github-Event'] + with sentry_sdk.configure_scope() as scope: + if scope.transaction: + # only in 1.8.0 (or at least 1.7.2 + if hasattr(scope, 'set_transaction_name'): + scope.set_transaction_name(f"webhook {event}") + else: # but our servers use 1.4.3 + scope.transaction = f"webhook {event}" github._gh.info(self._format(req)) + data = request.get_json_data() + repo = data.get('repository', {}).get('full_name') + env = request.env(user=1) + + source = repo and env['runbot_merge.events_sources'].search([('repository', '=', repo)]) + if not source: + _logger.warning( + "Ignored hook %s to unknown source repository %s", + req.headers.get("X-Github-Delivery"), + repo, + ) + return werkzeug.exceptions.Forbidden() + elif secret := source.secret: + signature = 'sha256=' + hmac.new(secret.strip().encode(), req.get_data(), hashlib.sha256).hexdigest() + if not hmac.compare_digest(signature, req.headers.get('X-Hub-Signature-256', '')): + _logger.warning( + "Ignored hook %s with incorrect signature on %s: got %s expected %s, in:\n%s", + req.headers.get('X-Github-Delivery'), + repo, + req.headers.get('X-Hub-Signature-256'), + signature, + req.headers, + ) + return werkzeug.exceptions.Forbidden() + elif req.headers.get('X-Hub-Signature-256'): + _logger.info("No secret for %s but received a signature in:\n%s", repo, req.headers) + else: + _logger.info("No secret or signature for %s", repo) + c = EVENTS.get(event) if not c: _logger.warning('Unknown event %s', event) return 'Unknown event {}'.format(event) - repo = request.jsonrequest['repository']['full_name'] - env = request.env(user=1) - - secret = env['runbot_merge.repository'].search([ - ('name', '=', repo), - ]).project_id.secret - if secret: - signature = 'sha1=' + hmac.new(secret.encode('ascii'), req.get_data(), hashlib.sha1).hexdigest() - if not hmac.compare_digest(signature, req.headers.get('X-Hub-Signature', '')): - _logger.warning("Ignored hook with incorrect signature %s", - req.headers.get('X-Hub-Signature')) - return werkzeug.exceptions.Forbidden() - - return c(env, request.jsonrequest) + sentry_sdk.set_context('webhook', data) + return c(env, data) def _format(self, request): - return """<= {r.method} {r.full_path} + return """{r.method} {r.full_path} {headers} + {body} -vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv +vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\ """.format( r=request, headers='\n'.join( '\t%s: %s' % entry for entry in request.headers.items() ), - body=utils.shorten(request.get_data(as_text=True).strip(), 400) + body=request.get_data(as_text=True), ) def handle_pr(env, event): @@ -99,7 +182,7 @@ def handle_pr(env, event): return env['runbot_merge.pull_requests'].search([ ('repository', '=', repo.id), ('number', '=', pr['number']), - ('target', '=', target.id), + # ('target', '=', target.id), ]) # edition difficulty: pr['base']['ref] is the *new* target, the old one # is at event['change']['base']['ref'] (if the target changed), so edition @@ -143,18 +226,26 @@ def handle_pr(env, event): message = None if not branch: - message = f"This PR targets the un-managed branch {r}:{b}, it needs to be retargeted before it can be merged." + message = env.ref('runbot_merge.handle.branch.unmanaged')._format( + repository=r, + branch=b, + event=event, + ) _logger.info("Ignoring event %s on PR %s#%d for un-managed branch %s", event['action'], r, pr['number'], b) elif not branch.active: - message = f"This PR targets the disabled branch {r}:{b}, it needs to be retargeted before it can be merged." + message = env.ref('runbot_merge.handle.branch.inactive')._format( + repository=r, + branch=b, + event=event, + ) if message and event['action'] not in ('synchronize', 'closed'): feedback(message=message) if not branch: return "Not set up to care about {}:{}".format(r, b) - headers = request.httprequest.headers if request.httprequest else {} + headers = request.httprequest.headers if request else {} _logger.info( "%s: %s#%s (%s) (by %s, delivery %s by %s)", event['action'], @@ -164,6 +255,11 @@ def handle_pr(env, event): headers.get('X-Github-Delivery'), headers.get('User-Agent'), ) + sender = env['res.partner'].search([('github_login', '=', event['sender']['login'])], limit=1) + if not sender: + sender = env['res.partner'].create({'name': event['sender']['login'], 'github_login': event['sender']['login']}) + env['runbot_merge.pull_requests']._track_set_author(sender, fallback=True) + if event['action'] == 'opened': author_name = pr['user']['login'] author = env['res.partner'].search([('github_login', '=', author_name)], limit=1) @@ -172,7 +268,7 @@ def handle_pr(env, event): pr_obj = env['runbot_merge.pull_requests']._from_gh(pr) return "Tracking PR as {}".format(pr_obj.id) - pr_obj = env['runbot_merge.pull_requests']._get_or_schedule(r, pr['number']) + pr_obj = env['runbot_merge.pull_requests']._get_or_schedule(r, pr['number'], closing=event['action'] == 'closed') if not pr_obj: _logger.info("webhook %s on unknown PR %s#%s, scheduled fetch", event['action'], repo.name, pr['number']) return "Unknown PR {}:{}, scheduling fetch".format(repo.name, pr['number']) @@ -203,7 +299,8 @@ def handle_pr(env, event): ) pr_obj.write({ - 'state': 'opened', + 'reviewed_by': False, + 'error': False, 'head': pr['head']['sha'], 'squash': pr['commits'] == 1, }) @@ -227,26 +324,25 @@ def handle_pr(env, event): oldstate, ) return 'Closed {}'.format(pr_obj.display_name) - else: - _logger.warning( - '%s tried to close %s (state=%s)', - event['sender']['login'], - pr_obj.display_name, - oldstate, - ) - return 'Ignored: could not lock rows (probably being merged)' + + _logger.info( + '%s tried to close %s (state=%s) but locking failed', + event['sender']['login'], + pr_obj.display_name, + oldstate, + ) + return 'Ignored: could not lock rows (probably being merged)' if event['action'] == 'reopened' : if pr_obj.state == 'merged': feedback( close=True, - message="@%s ya silly goose you can't reopen a merged PR." % event['sender']['login'] + message=env.ref('runbot_merge.handle.pr.merged')._format(event=event), ) - - if pr_obj.state == 'closed': + elif pr_obj.closed: _logger.info('%s reopening %s', event['sender']['login'], pr_obj.display_name) pr_obj.write({ - 'state': 'opened', + 'closed': False, # updating the head triggers a revalidation 'head': pr['head']['sha'], 'squash': pr['commits'] == 1, @@ -279,6 +375,7 @@ def handle_status(env, event): statuses = c.statuses::jsonb || EXCLUDED.statuses::jsonb WHERE NOT c.statuses::jsonb @> EXCLUDED.statuses::jsonb """, [event['sha'], status_value]) + env.ref("runbot_merge.process_updated_commits")._trigger() return 'ok' @@ -290,6 +387,10 @@ def handle_comment(env, event): issue = event['issue']['number'] author = event['comment']['user']['login'] comment = event['comment']['body'] + if len(comment) > 5000: + _logger.warning('comment(%s): %s %s#%s => ignored (%d characters)', event['comment']['html_url'], author, repo, issue, len(comment)) + return "ignored: too big" + _logger.info('comment[%s]: %s %s#%s %r', event['action'], author, repo, issue, comment) if event['action'] != 'created': return "Ignored: action (%r) is not 'created'" % event['action'] @@ -301,6 +402,9 @@ def handle_review(env, event): pr = event['pull_request']['number'] author = event['review']['user']['login'] comment = event['review']['body'] or '' + if len(comment) > 5000: + _logger.warning('comment(%s): %s %s#%s => ignored (%d characters)', event['review']['html_url'], author, repo, pr, len(comment)) + return "ignored: too big" _logger.info('review[%s]: %s %s#%s %r', event['action'], author, repo, pr, comment) if event['action'] != 'submitted': @@ -311,7 +415,7 @@ def handle_review(env, event): target=event['pull_request']['base']['ref']) def handle_ping(env, event): - print("Got ping! {}".format(event['zen'])) + _logger.info("Got ping! %s", event['zen']) return "pong" EVENTS = { diff --git a/runbot_merge/controllers/dashboard.py b/runbot_merge/controllers/dashboard.py index e80a2f15..2d58e71e 100644 --- a/runbot_merge/controllers/dashboard.py +++ b/runbot_merge/controllers/dashboard.py @@ -1,36 +1,80 @@ # -*- coding: utf-8 -*- +from __future__ import annotations + +import base64 import collections +import colorsys +import hashlib +import io import json +import logging import pathlib +from dataclasses import dataclass +from email.utils import formatdate +from enum import Flag, auto +from functools import cached_property +from itertools import chain, product +from math import ceil +from typing import Tuple, cast, Mapping, Optional, List import markdown import markupsafe import werkzeug.exceptions +import werkzeug.wrappers +from PIL import Image, ImageDraw, ImageFont from odoo.http import Controller, route, request +from odoo.tools import file_open + +HORIZONTAL_PADDING = 20 +VERTICAL_PADDING = 5 + +_logger = logging.getLogger(__name__) LIMIT = 20 class MergebotDashboard(Controller): - @route('/runbot_merge', auth="public", type="http", website=True) + @route('/runbot_merge', auth="public", type="http", website=True, sitemap=True) def dashboard(self): + projects = request.env['runbot_merge.project'].with_context(active_test=False).sudo().search([]) + stagings = { + branch: projects.env['runbot_merge.stagings'].search([ + ('target', '=', branch.id)], order='staged_at desc', limit=6) + for project in projects + for branch in project.branch_ids + if branch.active + } + prefetch_set = list({ + id + for stagings in stagings.values() + for id in stagings.ids + }) + for st in stagings.values(): + st._prefetch_ids = prefetch_set + return request.render('runbot_merge.dashboard', { - 'projects': request.env['runbot_merge.project'].with_context(active_test=False).sudo().search([]), + 'projects': projects, + 'stagings_map': stagings, }) - @route('/runbot_merge/<int:branch_id>', auth='public', type='http', website=True) - def stagings(self, branch_id, until=None): + @route('/runbot_merge/<int:branch_id>', auth='public', type='http', website=True, sitemap=False) + def stagings(self, branch_id, until=None, state=''): branch = request.env['runbot_merge.branch'].browse(branch_id).sudo().exists() if not branch: raise werkzeug.exceptions.NotFound() - stagings = request.env['runbot_merge.stagings'].with_context(active_test=False).sudo().search([ - ('target', '=', branch.id), - ('staged_at', '<=', until) if until else (True, '=', True), - ], order='staged_at desc', limit=LIMIT+1) + staging_domain = [('target', '=', branch.id)] + if until: + staging_domain.append(('staged_at', '<=', until)) + if state: + staging_domain.append(('state', '=', state)) + + stagings = request.env['runbot_merge.stagings'].with_context(active_test=False).sudo().search(staging_domain, order='staged_at desc', limit=LIMIT + 1) return request.render('runbot_merge.branch_stagings', { 'branch': branch, 'stagings': stagings[:LIMIT], + 'until': until, + 'state': state, 'next': stagings[-1].staged_at if len(stagings) > LIMIT else None, }) @@ -49,7 +93,7 @@ class MergebotDashboard(Controller): entries.setdefault(key, []).extend(map(item_converter, items)) return entries - @route('/runbot_merge/changelog', auth='public', type='http', website=True) + @route('/runbot_merge/changelog', auth='public', type='http', website=True, sitemap=True) def changelog(self): md = markdown.Markdown(extensions=['nl2br'], output_format='html5') entries = self.entries(lambda t: markupsafe.Markup(md.convert(t))) @@ -57,8 +101,8 @@ class MergebotDashboard(Controller): 'entries': entries, }) - @route('/<org>/<repo>/pull/<int(min=1):pr>', auth='public', type='http', website=True) - def pr(self, org, repo, pr): + @route('/<org>/<repo>/pull/<int(min=1):pr><any("", ".png"):png>', auth='public', type='http', website=True, sitemap=False) + def pr(self, org, repo, pr, png): pr_id = request.env['runbot_merge.pull_requests'].sudo().search([ ('repository.name', '=', f'{org}/{repo}'), ('number', '=', int(pr)), @@ -66,8 +110,17 @@ class MergebotDashboard(Controller): if not pr_id: raise werkzeug.exceptions.NotFound() if not pr_id.repository.group_id <= request.env.user.groups_id: + _logger.warning( + "Access error: %s (%s) tried to access %s but lacks access", + request.env.user.login, + request.env.user.name, + pr_id.display_name, + ) raise werkzeug.exceptions.NotFound() + if png: + return raster_render(pr_id) + st = {} if pr_id.statuses: # normalise `statuses` to map to a dict @@ -80,3 +133,425 @@ class MergebotDashboard(Controller): 'merged_head': json.loads(pr_id.commits_map).get(''), 'statuses': st }) + +def raster_render(pr): + default_headers = { + 'Content-Type': 'image/png', + 'Last-Modified': formatdate(), + # - anyone can cache the image, so public + # - crons run about every minute so that's how long a request is fresh + # - if the mergebot can't be contacted, allow using the stale response (no must-revalidate) + # - intermediate caches can recompress the PNG if they want (pillow is not a very good PNG generator) + # - the response is mutable even during freshness, technically (as there + # is no guarantee the freshness window lines up with the cron, plus + # some events are not cron-based) + # - maybe don't allow serving the stale image *while* revalidating? + # - allow serving a stale image for a day if the server returns 500 + 'Cache-Control': 'public, max-age=60, stale-if-error=86400', + } + if if_none_match := request.httprequest.headers.get('If-None-Match'): + # just copy the existing value out if we received any + default_headers['ETag'] = if_none_match + + # weak validation: check the latest modification date of all objects involved + project, repos, branches, genealogy = pr.env.ref('runbot_merge.dashboard-pre')\ + ._run_action_code_multi({'pr': pr}) + + # last-modified should be in RFC2822 format, which is what + # email.utils.formatdate does (sadly takes a timestamp but...) + last_modified = formatdate(max(( + o.write_date + for o in chain( + project, + repos, + branches, + genealogy, + genealogy.all_prs | pr, + ) + )).timestamp()) + # The (304) response must not contain a body and must include the headers + # that would have been sent in an equivalent 200 OK response + headers = {**default_headers, 'Last-Modified': last_modified} + if request.httprequest.headers.get('If-Modified-Since') == last_modified: + return werkzeug.wrappers.Response(status=304, headers=headers) + + batches = pr.env.ref('runbot_merge.dashboard-prep')._run_action_code_multi({ + 'pr': pr, + 'repos': repos, + 'branches': branches, + 'genealogy': genealogy, + }) + + etag = hashlib.sha256(f"(P){pr.id},{pr.repository.id},{pr.target.id},{pr.batch_id.blocked}".encode()) + # repos and branches should be in a consistent order so can just hash that + etag.update(''.join(f'(R){r.name}' for r in repos).encode()) + etag.update(''.join(f'(T){b.name},{b.active}' for b in branches).encode()) + # and product of deterministic iterations should be deterministic + for r, b in product(repos, branches): + ps = batches[r, b] + + etag.update(f"(B){ps['state']},{ps['detached']},{ps['active']}".encode()) + etag.update(''.join( + f"(PS){p['label']},{p['closed']},{p['number']},{p['checked']},{p['reviewed']},{p['attached']},{p['pr'].staging_id.id}" + for p in ps['prs'] + ).encode()) + + etag = headers['ETag'] = base64.b32encode(etag.digest()).decode() + if if_none_match == etag: + return werkzeug.wrappers.Response(status=304, headers=headers) + + if not pr.batch_id.target: + im = render_inconsistent_batch(pr.batch_id) + else: + im = render_full_table(pr, branches, repos, batches) + + buffer = io.BytesIO() + im.save(buffer, 'png', optimize=True) + return werkzeug.wrappers.Response(buffer.getvalue(), headers=headers) + +class Decoration(Flag): + STRIKETHROUGH = auto() + +@dataclass(frozen=True) +class Text: + content: str + font: ImageFont.FreeTypeFont + color: Color + decoration: Decoration = Decoration(0) + + @cached_property + def width(self) -> int: + return ceil(self.font.getlength(self.content)) + + @property + def height(self) -> int: + return sum(self.font.getmetrics()) + + def draw(self, image: ImageDraw.ImageDraw, left: int, top: int): + image.text((left, top), self.content, fill=self.color, font=self.font) + if Decoration.STRIKETHROUGH in self.decoration: + x1, _, x2, _ = self.font.getbbox(self.content) + _, y1, _, y2 = self.font.getbbox('x') + # put the strikethrough line about 1/3rd down the x (default seems + # to be a bit above halfway down but that's ugly with numbers which + # is most of our stuff) + y = top + y1 + (y2 - y1) / 3 + image.line([(left + x1, y), (left + x2, y)], self.color) + +@dataclass(frozen=True) +class Checkbox: + checked: Optional[bool] + font: ImageFont.FreeTypeFont + color: Color + success: Color + error: Color + + @cached_property + def width(self) -> int: + return ceil(max( + self.font.getlength(BOX_EMPTY), + self.font.getlength(CHECK_MARK), + self.font.getlength(CROSS), + )) + + @property + def height(self): + return sum(self.font.getmetrics()) + + def draw(self, image: ImageDraw.ImageDraw, left: int, top: int): + image.text((left, top+5), BOX_EMPTY, fill=self.color, font=self.font) + if self.checked is True: + image.text((left, top+4), CHECK_MARK, fill=self.success, font=self.font) + elif self.checked is False: + image.text((left, top+4), CROSS, fill=self.error, font=self.font) + +@dataclass(frozen=True) +class Line: + spans: List[Text | Checkbox | Lines] + + @property + def width(self) -> int: + return sum(s.width for s in self.spans) + + @property + def height(self) -> int: + return max(s.height for s in self.spans) if self.spans else 0 + + def draw(self, image: ImageDraw.ImageDraw, left: int, top: int): + for span in self.spans: + span.draw(image, left, top) + left += span.width + +@dataclass(frozen=True) +class Lines: + lines: List[Line] + + @property + def width(self) -> int: + return max(l.width for l in self.lines) + + @property + def height(self) -> int: + return sum(l.height for l in self.lines) + + def draw(self, image: ImageDraw.ImageDraw, left: int, top: int): + for line in self.lines: + line.draw(image, left, top) + top += line.height + +@dataclass(frozen=True) +class Cell: + content: Lines | Line | Text + background: Color = (255, 255, 255) + attached: bool = True + + @cached_property + def width(self) -> int: + return self.content.width + 2 * HORIZONTAL_PADDING + + @cached_property + def height(self) -> int: + return self.content.height + 2 * VERTICAL_PADDING + + +def render_full_table(pr, branches, repos, batches): + with file_open('web/static/fonts/google/Open_Sans/Open_Sans-Regular.ttf', 'rb') as f: + font = ImageFont.truetype(f, size=16, layout_engine=0) + f.seek(0) + supfont = ImageFont.truetype(f, size=13, layout_engine=0) + with file_open('web/static/fonts/google/Open_Sans/Open_Sans-Bold.ttf', 'rb') as f: + bold = ImageFont.truetype(f, size=16, layout_engine=0) + with file_open('web/static/src/libs/fontawesome/fonts/fontawesome-webfont.ttf', 'rb') as f: + icons = ImageFont.truetype(f, size=16, layout_engine=0) + + rowheights = collections.defaultdict(int) + colwidths = collections.defaultdict(int) + cells = {} + for b in chain([None], branches): + for r in chain([None], repos): + opacity = 1.0 if b is None or b.active else 0.5 + current_row = b == pr.target + background = BG['info'] if current_row or r == pr.repository else BG[None] + + if b is None: # first row + cell = Cell(Text("" if r is None else r.name, bold, TEXT), background) + elif r is None: # first column + cell = Cell(Text(b.name, font, blend(TEXT, opacity, over=background)), background) + elif current_row: + ps = batches[r, b] + bgcolor = lighten(BG[ps['state']], by=-0.05) if pr in ps['pr_ids'] else BG[ps['state']] + background = blend(bgcolor, opacity, over=background) + foreground = blend((39, 110, 114), opacity, over=background) + success = blend(SUCCESS, opacity, over=background) + error = blend(ERROR, opacity, over=background) + + boxes = { + False: Checkbox(False, icons, foreground, success, error), + True: Checkbox(True, icons, foreground, success, error), + None: Checkbox(None, icons, foreground, success, error), + } + prs = [] + attached = True + for p in ps['prs']: + pr = p['pr'] + attached = attached and p['attached'] + + if pr.staging_id: + sub = ": is staged" + elif pr.error: + sub = ": staging failed" + else: + sub = "" + + lines = [ + Line([Text( + f"#{p['number']}{sub}", + font, + foreground, + decoration=Decoration.STRIKETHROUGH if p['closed'] else Decoration(0), + )]), + ] + + # no need for details if closed or in error + if pr.state not in ('merged', 'closed', 'error') and not pr.staging_id: + if pr.draft: + lines.append(Line([boxes[False], Text("is in draft", font, error)])) + lines.extend([ + Line([ + boxes[bool(pr.squash or pr.merge_method)], + Text( + "merge method: {}".format('single' if pr.squash else (pr.merge_method or 'missing')), + font, + foreground if pr.squash or pr.merge_method else error, + ), + ]), + Line([ + boxes[bool(pr.reviewed_by)], + Text( + "Reviewed" if pr.reviewed_by else "Not Reviewed", + font, + foreground if pr.reviewed_by else error, + ) + ]), + Line([ + boxes[pr.batch_id.skipchecks or pr.status == 'success'], + Text("CI", font, foreground if pr.batch_id.skipchecks or pr.status == 'success' else error), + ]), + ]) + if not pr.batch_id.skipchecks: + statuses = json.loads(pr.statuses_full) + for ci in pr.repository.status_ids._for_pr(pr): + st = (statuses.get(ci.context.strip()) or {'state': 'pending'})['state'] + color = foreground + if st in ('error', 'failure'): + color = error + box = boxes[False] + elif st == 'success': + box = boxes[True] + else: + box = boxes[None] + + lines.append(Line([ + Text(" - ", font, color), + box, + Text(f"{ci.repo_id.name}: {ci.context}", font, color) + ])) + prs.append(Lines(lines)) + cell = Cell(Line(prs), background, attached) + else: + ps = batches[r, b] + bgcolor = lighten(BG[ps['state']], by=-0.05) if pr in ps['pr_ids'] else BG[ps['state']] + background = blend(bgcolor, opacity, over=background) + foreground = blend((39, 110, 114), opacity, over=background) + + line = [] + attached = True + for p in ps['prs']: + line.append(Text( + f"#{p['number']}", + font, + foreground, + decoration=Decoration.STRIKETHROUGH if p['closed'] else Decoration(0), + )) + attached = attached and p['attached'] + for attribute in filter(None, [ + 'error' if p['pr'].error else '', + '' if p['checked'] else 'missing statuses', + '' if p['reviewed'] else 'missing r+', + '' if p['attached'] else 'detached', + 'staged' if p['pr'].staging_id else 'ready' if p['pr']._ready else '' + ]): + color = SUCCESS if attribute in ('staged', 'ready') else ERROR + line.append(Text(f' {attribute}', supfont, blend(color, opacity, over=background))) + line.append(Text(" ", font, foreground)) + cell = Cell(Line(line), background, attached) + + cells[r, b] = cell + rowheights[b] = max(rowheights[b], cell.height) + colwidths[r] = max(colwidths[r], cell.width) + + im = Image.new("RGB", (sum(colwidths.values()), sum(rowheights.values())), "white") + # no need to set the font here because every text element has its own + draw = ImageDraw.Draw(im, 'RGB') + top = 0 + for b in chain([None], branches): + left = 0 + for r in chain([None], repos): + cell = cells[r, b] + + # for a given cell, we first print the background, then the text, then + # the borders + # need to subtract 1 because pillow uses inclusive rect coordinates + right = left + colwidths[r] - 1 + bottom = top + rowheights[b] - 1 + draw.rectangle( + (left, top, right, bottom), + cell.background, + ) + # draw content adding padding + cell.content.draw(draw, left=left + HORIZONTAL_PADDING, top=top + VERTICAL_PADDING) + # draw bottom-right border + draw.line([ + (left, bottom), + (right, bottom), + (right, top), + ], fill=(172, 176, 170)) + if not cell.attached: + # overdraw previous cell's bottom border + draw.line([(left, top-1), (right-1, top-1)], fill=ERROR) + + left += colwidths[r] + top += rowheights[b] + + return im + + +def render_inconsistent_batch(batch): + """If a batch has inconsistent targets, just point out the inconsistency by + listing the PR and targets + """ + with file_open('web/static/fonts/google/Open_Sans/Open_Sans-Regular.ttf', 'rb') as f: + font = ImageFont.truetype(f, size=16, layout_engine=0) + + im = Image.new("RGB", (4000, 4000), color=BG['danger']) + w = h = 0 + def draw(label, draw=ImageDraw.Draw(im)): + nonlocal w, h + + draw.text((0, h), label, fill=blend(ERROR, 1.0, over=BG['danger']), font=font) + + _, _, ww, hh = font.getbbox(label) + w = max(w, ww) + h += hh + + draw(" Inconsistent targets:") + for p in batch.prs: + draw(f" • {p.display_name} has target '{p.target.name}'") + draw(" To resolve, either retarget or close the mis-targeted pull request(s).") + + return im.crop((0, 0, w+10, h+5)) + + + +Color = Tuple[int, int, int] +TEXT: Color = (102, 102, 102) +ERROR: Color = (220, 53, 69) +SUCCESS: Color = (40, 167, 69) +BG: Mapping[str | None, Color] = collections.defaultdict(lambda: (255, 255, 255), { + 'info': (217, 237, 247), + 'success': (223, 240, 216), + 'warning': (252, 248, 227), + 'danger': (242, 222, 222), +}) + + +CHECK_MARK = "\uf00c" +CROSS = "\uf00d" +BOX_EMPTY = "\uf096" + + +def blend_single(c: int, over: int, opacity: float) -> int: + return round(over * (1 - opacity) + c * opacity) + +def blend(color: Color, opacity: float, *, over: Color = (255, 255, 255)) -> Color: + assert 0.0 <= opacity <= 1.0 + return ( + blend_single(color[0], over[0], opacity), + blend_single(color[1], over[1], opacity), + blend_single(color[2], over[2], opacity), + ) + +def lighten(color: Color, *, by: float) -> Color: + # colorsys uses values in the range [0, 1] rather than pillow/CSS-style [0, 225] + r, g, b = tuple(c / 255 for c in color) + hue, lightness, saturation = colorsys.rgb_to_hls(r, g, b) + + # by% of the way between value and 1.0 + if by >= 0: lightness += (1.0 - lightness) * by + # -by% of the way between 0 and value + else:lightness *= (1.0 + by) + + return cast(Color, tuple( + round(c * 255) + for c in colorsys.hls_to_rgb(hue, lightness, saturation) + )) diff --git a/runbot_merge/controllers/reviewer_provisioning.py b/runbot_merge/controllers/reviewer_provisioning.py index 1b7a9f2f..35f07d24 100644 --- a/runbot_merge/controllers/reviewer_provisioning.py +++ b/runbot_merge/controllers/reviewer_provisioning.py @@ -1,17 +1,18 @@ # -*- coding: utf-8 -*- import logging +from odoo import Command from odoo.http import Controller, request, route try: from odoo.addons.saas_worker.util import from_role except ImportError: - def from_role(_): + def from_role(*_, **__): return lambda _: None _logger = logging.getLogger(__name__) class MergebotReviewerProvisioning(Controller): - @from_role('accounts') + @from_role('accounts', signed=True) @route('/runbot_merge/users', type='json', auth='public') def list_users(self): env = request.env(su=True) @@ -23,7 +24,7 @@ class MergebotReviewerProvisioning(Controller): if u.github_login ] - @from_role('accounts') + @from_role('accounts', signed=True) @route('/runbot_merge/provision', type='json', auth='public') def provision_user(self, users): _logger.info('Provisioning %s users: %s.', len(users), ', '.join(map( @@ -34,7 +35,12 @@ class MergebotReviewerProvisioning(Controller): Partners = env['res.partner'] Users = env['res.users'] - existing_partners = Partners.search([ + existing_logins = set() + existing_oauth = set() + for u in Users.with_context(active_test=False).search([]): + existing_logins.add(u.login) + existing_oauth .add((u.oauth_provider_id.id, u.oauth_uid)) + existing_partners = Partners.with_context(active_test=False).search([ '|', ('email', 'in', [u['email'] for u in users]), ('github_login', 'in', [u['github_login'] for u in users]) ]) @@ -55,29 +61,54 @@ class MergebotReviewerProvisioning(Controller): if p.github_login: # assume there can't be an existing one because github_login is # unique, and should not be able to collide with emails - partners[p.github_login] = p + partners[p.github_login.casefold()] = p + portal = env.ref('base.group_portal') internal = env.ref('base.group_user') odoo_provider = env.ref('auth_oauth.provider_openerp') to_create = [] - created = updated = 0 + updated = 0 + to_activate = Partners for new in users: if 'sub' in new: new['oauth_provider_id'] = odoo_provider.id new['oauth_uid'] = new.pop('sub') # prioritise by github_login as that's the unique-est point of information - current = partners.get(new['github_login']) or partners.get(new['email']) or Partners + current = partners.get(new['github_login'].casefold()) or partners.get(new['email']) or Partners + if not current.active: + to_activate |= current # entry doesn't have user -> create user if not current.user_ids: - # skip users without an email (= login) as that - # fails if not new['email']: + _logger.info( + "Unable to create user for %s: no email in provisioning data", + current.display_name + ) + continue + if 'oauth_uid' in new: + if (new['oauth_provider_id'], new['oauth_uid']) in existing_oauth: + _logger.warning( + "Attempted to create user with duplicate oauth uid " + "%s with provider %r for provisioning entry %r. " + "There is likely a duplicate partner (one version " + "with email, one with github login)", + new['oauth_uid'], odoo_provider.display_name, new, + ) + continue + if new['email'] in existing_logins: + _logger.warning( + "Attempted to create user with duplicate login %s for " + "provisioning entry %r. There is likely a duplicate " + "partner (one version with email, one with github " + "login)", + new['email'], new, + ) continue new['login'] = new['email'] - new['groups_id'] = [(4, internal.id)] + new['groups_id'] = [Command.link(internal.id)] # entry has partner -> create user linked to existing partner # (and update partner implicitly) if current: @@ -88,26 +119,36 @@ class MergebotReviewerProvisioning(Controller): # otherwise update user (if there is anything to update) user = current.user_ids if len(user) != 1: - _logger.warning("Got %d users for partner %s.", len(user), current.display_name) + _logger.warning("Got %d users for partner %s, updating first.", len(user), current.display_name) user = user[:1] + new.setdefault("active", True) update_vals = { k: v for k, v in new.items() - if v not in ('login', 'email') if v != (user[k] if k != 'oauth_provider_id' else user[k].id) } + if user.has_group('base.group_portal'): + update_vals['groups_id'] = [ + Command.unlink(portal.id), + Command.link(internal.id), + ] + if update_vals: user.write(update_vals) updated += 1 + + created = len(to_create) if to_create: # only create 100 users at a time to avoid request timeout - Users.create(to_create[:100]) - created = len(to_create[:100]) + Users.create(to_create) + + if to_activate: + to_activate.active = True _logger.info("Provisioning: created %d updated %d.", created, updated) return [created, updated] - @from_role('accounts') + @from_role('accounts', signed=True) @route(['/runbot_merge/get_reviewers'], type='json', auth='public') def fetch_reviewers(self, **kwargs): reviewers = request.env['res.partner.review'].sudo().search([ @@ -115,17 +156,18 @@ class MergebotReviewerProvisioning(Controller): ]).mapped('partner_id.github_login') return reviewers - @from_role('accounts') + @from_role('accounts', signed=True) @route(['/runbot_merge/remove_reviewers'], type='json', auth='public', methods=['POST']) def update_reviewers(self, github_logins, **kwargs): partners = request.env['res.partner'].sudo().search([('github_login', 'in', github_logins)]) partners.write({ - 'review_rights': [(5, 0, 0)], - 'delegate_reviewer': [(5, 0, 0)], + 'email': False, + 'review_rights': [Command.clear()], + 'delegate_reviewer': [Command.clear()], }) # Assign the linked users as portal users partners.mapped('user_ids').write({ - 'groups_id': [(6, 0, [request.env.ref('base.group_portal').id])] + 'groups_id': [Command.set([request.env.ref('base.group_portal').id])] }) return True diff --git a/runbot_merge/data/merge_cron.xml b/runbot_merge/data/merge_cron.xml index f8736785..aa9de63e 100644 --- a/runbot_merge/data/merge_cron.xml +++ b/runbot_merge/data/merge_cron.xml @@ -4,30 +4,33 @@ <field name="model_id" ref="model_runbot_merge_project"/> <field name="state">code</field> <field name="code">model._check_stagings(True)</field> - <field name="interval_number">1</field> - <field name="interval_type">minutes</field> + <field name="interval_number">6</field> + <field name="interval_type">hours</field> <field name="numbercall">-1</field> <field name="doall" eval="False"/> + <field name="priority">30</field> </record> <record model="ir.cron" id="staging_cron"> <field name="name">Check for progress of PRs and create Stagings</field> <field name="model_id" ref="model_runbot_merge_project"/> <field name="state">code</field> <field name="code">model._create_stagings(True)</field> - <field name="interval_number">1</field> - <field name="interval_type">minutes</field> + <field name="interval_number">6</field> + <field name="interval_type">hours</field> <field name="numbercall">-1</field> <field name="doall" eval="False"/> + <field name="priority">40</field> </record> <record model="ir.cron" id="feedback_cron"> <field name="name">Send feedback to PR</field> <field name="model_id" ref="model_runbot_merge_pull_requests_feedback"/> <field name="state">code</field> <field name="code">model._send()</field> - <field name="interval_number">1</field> - <field name="interval_type">minutes</field> + <field name="interval_number">6</field> + <field name="interval_type">hours</field> <field name="numbercall">-1</field> <field name="doall" eval="False"/> + <field name="priority">60</field> </record> <record model="ir.cron" id="labels_cron"> <field name="name">Update labels on PR</field> @@ -38,16 +41,18 @@ <field name="interval_type">minutes</field> <field name="numbercall">-1</field> <field name="doall" eval="False"/> + <field name="priority">70</field> </record> <record model="ir.cron" id="fetch_prs_cron"> <field name="name">Check for PRs to fetch</field> <field name="model_id" ref="model_runbot_merge_fetch_job"/> <field name="state">code</field> <field name="code">model._check(True)</field> - <field name="interval_number">1</field> - <field name="interval_type">minutes</field> + <field name="interval_number">6</field> + <field name="interval_type">hours</field> <field name="numbercall">-1</field> <field name="doall" eval="False"/> + <field name="priority">10</field> </record> <record model="ir.cron" id="check_linked_prs_status"> <field name="name">Warn on linked PRs where only one is ready</field> @@ -58,15 +63,17 @@ <field name="interval_type">hours</field> <field name="numbercall">-1</field> <field name="doall" eval="False"/> + <field name="priority">50</field> </record> <record model="ir.cron" id="process_updated_commits"> <field name="name">Impact commit statuses on PRs and stagings</field> <field name="model_id" ref="model_runbot_merge_commit"/> <field name="state">code</field> <field name="code">model._notify()</field> - <field name="interval_number">1</field> - <field name="interval_type">minutes</field> + <field name="interval_number">6</field> + <field name="interval_type">hours</field> <field name="numbercall">-1</field> <field name="doall" eval="False"/> + <field name="priority">20</field> </record> </odoo> diff --git a/runbot_merge/data/runbot_merge.pull_requests.feedback.template.csv b/runbot_merge/data/runbot_merge.pull_requests.feedback.template.csv new file mode 100644 index 00000000..eab2a241 --- /dev/null +++ b/runbot_merge/data/runbot_merge.pull_requests.feedback.template.csv @@ -0,0 +1,174 @@ +id,template,help +runbot_merge.handle.branch.unmanaged,"This PR targets the un-managed branch {repository}:{branch}, it needs to be retargeted before it can be merged.","Notifies of event on PR whose branch is not managed by the mergebot. + +repository: repository name +branch: branch (ref) name +event: complete pr event" +runbot_merge.handle.branch.inactive,"This PR targets the disabled branch {repository}:{branch}, it needs to be retargeted before it can be merged.","Notifies of event on PR whose branch is deactivated. + +repository: repository name +branch: branch (ref) name +event: complete pr event" +runbot_merge.handle.pr.merged,@{event[sender][login]} ya silly goose you can't reopen a merged PR.,"Notifies that a user tried to reopen a merged PR. + +Event: complete PR event" +runbot_merge.pr.load.unmanaged,"Branch `{pr[base][ref]}` is not within my remit, imma just ignore it.","Notifies that a user tried to load a PR targeting a non-handled branch. + +pr: pull request (github object) +Repository: repository object (???)" +runbot_merge.pr.load.fetched,"{pr.ping}I didn't know about this PR and had to retrieve its information, you may have to re-approve it as I didn't see previous commands.","Notifies that we did retrieve an unknown PR (either by request or as side effect of an interaction). + +Pr: pr object we just created" +runbot_merge.pr.branch.disabled,"{pr.ping}the target branch {pr.target.name!r} has been disabled, you may want to close this PR.","Notifies that the target branch for this PR was deactivated. + +pr: pull request in question" +runbot_merge.pr.merge.failed,{pr.ping}unable to stage: {reason},"Notifies that the PR could not be merged into the staging branch. + +pr: pr object we tried to merge +reason: error message +exc: exception object" +runbot_merge.pr.fetch.unmanaged,I'm sorry. Branch `{branch}` is not within my remit.,"Responds to a request to fetch a PR to an unmanaged branch. + +repository: pr repository +branch: target branch +number: pr number" +runbot_merge.command.access.no,"I'm sorry, @{user}. I'm afraid I can't do that.","Responds to command by a user who has no rights at all. + +user: github login of comment sender +pr: pr object to which the command was sent" +runbot_merge.command.approve.failure,@{user} you may want to rebuild or fix this PR as it has failed CI.,"Responds to r+ of PR with failed CI. + +user: github login of comment sender +pr: pr object to which the command was sent" +runbot_merge.command.unapprove.p0,"Skipchecks removed due to r-.","Responds to r- of pr in skipchecks. + +user: github login of comment sender +pr: pr object to which the command was sent" +runbot_merge.command.method,Merge method set to {new_method}.,"Responds to the setting of the merge method. + +new_method: ... +pr: pr object to which the command was sent +user: github login of the comment sender" +runbot_merge.failure.approved,{pr.ping}{status!r} failed on this reviewed PR.,"Notification of failed status on a reviewed PR. + +pr: pull request in question +status: failed status" +runbot_merge.pr.created,[![Pull request status dashboard]({pr.url}.png)]({pr.url}),"Initial comment on PR creation. + +pr: created pr" +runbot_merge.pr.linked.not_ready,{pr.ping}linked pull request(s) {siblings} not ready. Linked PRs are not staged until all of them are ready.,"Comment when a PR is ready (approved & validated) but it is linked to other PRs which are not. + +pr: pr we're looking at +siblings: its siblings, as a single comma-separated list of PR links" +runbot_merge.pr.merge_method,"{pr.ping}because this PR has multiple commits, I need to know how to merge it: + +{methods}","Comment when a PR is ready but doesn't have a merge method set + +pr: the pr we can't stage +methods: a markdown-formatted list of valid merge methods" +runbot_merge.pr.staging.mismatch,"{pr.ping}we apparently missed updates to this PR and tried to stage it in a state which might not have been approved. + +The properties {mismatch} were not correctly synchronized and have been updated. + +<details><summary>differences</summary> + +```diff +{diff}``` +</details> + +Note that we are unable to check the properties {unchecked}. + +Please check and re-approve. +","Comment when staging was attempted but a sanity check revealed the github state and the mergebot state differ. + +pr: the pr we tried to stage +mismatch: comma separated list of mismatched property names +diff: patch-style view of the differing properties +unchecked: comma-separated list of properties which can't be checked" +runbot_merge.pr.staging.fail,{pr.ping}staging failed: {message},"Comment when a PR caused a staging to fail (normally only sent if the staging has a single batch, may be sent on multiple PRs depending whether the heuristic to guess the problematic PR of a batch succeeded) + +pr: the pr +message: staging failure information (error message, build link, etc...)" +runbot_merge.forwardport.updates.closed,"{pr.ping}ancestor PR {parent.display_name} has been updated but this PR is {pr.state} and can't be updated to match. + +You may want or need to manually update any followup PR.","Comment when a PR is updated and on of its followups is already merged or closed. Sent to the followup. + +pr: the closed or merged PR +parent: the modified ancestor PR" +runbot_merge.forwardport.updates.conflict.parent,"{pr.ping}WARNING: the latest change ({pr.head}) triggered a conflict when updating the next forward-port ({next.display_name}), and has been ignored. + +You will need to update this pull request differently, or fix the issue by hand on {next.display_name}.","Comment when a PR update triggers a conflict in a child. + +pr: updated parent PR +next: child PR in conflict" +runbot_merge.forwardport.updates.conflict.child,"{pr.ping}WARNING: the update of {previous.display_name} to {previous.head} has caused a conflict in this pull request, data may have been lost.{stdout}{stderr}","Comment when a PR update followup is in conflict. + +pr: PR where update followup conflict happened +previous: parent PR which triggered the followup +stdout: markdown-formatted stdout of git, if any +stderr: markdown-formatted stderr of git, if any" +runbot_merge.forwardport.update.detached,{pr.ping}this PR was modified / updated and has become a normal PR. It must be merged directly.,"Comment when a forwardport PR gets updated, documents that the PR now needs to be merged the “normal” way. + +pr: the pr in question " +runbot_merge.forwardport.update.parent,{pr.ping}child PR {child.display_name} was modified / updated and has become a normal PR. This PR (and any of its parents) will need to be merged independently as approvals won't cross.,"Sent to an open PR when its direct child has been detached. + +pr: the pr +child: its detached child" +runbot_merge.forwardport.ci.failed,{pr.ping}{ci} failed on this forward-port PR,"Comment when CI fails on a forward-port PR (which thus won't port any further, for now). + +pr: the pr in question +ci: the failed status" +runbot_merge.forwardport.failure.discrepancy,{pr.ping}this pull request can not be forward-ported: next branch is {next!r} but linked pull request {linked.display_name} has a next branch {other!r}.,"Comment when we tried to forward port a PR batch, but the PRs have different next targets (unlikely to happen really). + +pr: the pr we tried to forward port +linked: the linked PR with a different next target +next: next target for the current pr +other: next target for the other pr" +runbot_merge.forwardport.failure.conflict,"{pr.ping}the next pull request ({new.display_name}) is in conflict. You can merge the chain up to here by saying +> @{pr.repository.project_id.github_prefix} r+ +{footer}","Comment when a forward port was created but is in conflict, warns of that & gives instructions for current PR. + +pr: the pr which was just forward ported +new: the new forward-port +footer: some footer text" +runbot_merge.forwardport.reminder,{pr.ping}this forward port of {source.display_name} is awaiting action (not merged or closed).,"Comment when a forward port has outstanding (not merged or closed) descendants + +pr: the forward-port +source: the source PR" +runbot_merge.forwardport.failure,"{pr.ping}cherrypicking of pull request {pr.source_id.display_name} failed. +{commits}{stdout}{stderr} +Either perform the forward-port manually (and push to this branch, proceeding as usual) or close this PR (maybe?). + +In the former case, you may want to edit this PR message as well. + +:warning: after resolving this conflict, you will need to merge it via @{pr.repository.project_id.github_prefix}. +{footer}","Comment when a forward-port failed. + +pr: the new pr (in failure) +commits: markdown-formatted list of source commits, indicating which failed +stdout: git's stdout +stderr: git's stderr +footer: some footer text" +runbot_merge.forwardport.linked,"{pr.ping}while this was properly forward-ported, at least one co-dependent PR ({siblings}) did not succeed. You will need to fix it before this can be merged. + +Both this PR and the others will need to be approved via `@{pr.repository.project_id.github_prefix} r+` as they are all considered “in conflict”. +{footer} ","Comment when a forward port succeeded but at least one sibling failed. + +pr: the current pr (new) +siblings: comma-separated list of sibling links +footer: some footer text" +runbot_merge.forwardport.final,"{pr.ping}this PR targets {pr.target.name} and is the last of the forward-port chain{containing} +{ancestors} +To merge the full chain, use +> @{pr.repository.project_id.github_prefix} r+ +{footer}","Comment when a forward port was created and is the last of a sequence (target the limit branch). + +pr: the new forward port +containing: label changing depending whether there are ancestors to merge +ancestors: markdown formatted list of parent PRs which can be approved as part of the chain +footer: a footer" +runbot_merge.forwardport.intermediate,"This PR targets {pr.target.name} and is part of the forward-port chain. Further PRs will be created up to {pr.limit_pretty}. +{footer}","Comment when a forward port was succcessfully created but is not the last of the line. + +pr: the new forward port +footer: a footer" diff --git a/runbot_merge/git.py b/runbot_merge/git.py new file mode 100644 index 00000000..caaa6e5d --- /dev/null +++ b/runbot_merge/git.py @@ -0,0 +1,272 @@ +import dataclasses +import itertools +import logging +import os +import pathlib +import resource +import stat +import subprocess +from typing import Optional, TypeVar, Union, Sequence, Tuple, Dict + +from odoo.tools.appdirs import user_cache_dir +from .github import MergeError, PrCommit + +_logger = logging.getLogger(__name__) + +def source_url(repository) -> str: + return 'https://{}@github.com/{}'.format( + repository.project_id.github_token, + repository.name, + ) + +def fw_url(repository) -> str: + return 'https://{}@github.com/{}'.format( + repository.project_id.fp_github_token, + repository.fp_remote_target, + ) + +Authorship = Union[Tuple[str, str], Tuple[str, str, str]] + +def get_local(repository, *, clone: bool = True) -> 'Optional[Repo]': + repos_dir = pathlib.Path(user_cache_dir('mergebot')) + repos_dir.mkdir(parents=True, exist_ok=True) + # NB: `repository.name` is `$org/$name` so this will be a subdirectory, probably + repo_dir = repos_dir / repository.name + + if repo_dir.is_dir(): + return git(repo_dir) + elif clone: + _logger.info("Cloning out %s to %s", repository.name, repo_dir) + subprocess.run(['git', 'clone', '--bare', source_url(repository), str(repo_dir)], check=True) + # bare repos don't have fetch specs by default, and fetching *into* + # them is a pain in the ass, configure fetch specs so `git fetch` + # works properly + repo = git(repo_dir) + repo.config('--add', 'remote.origin.fetch', '+refs/heads/*:refs/heads/*') + # negative refspecs require git 2.29 + repo.config('--add', 'remote.origin.fetch', '^refs/heads/tmp.*') + repo.config('--add', 'remote.origin.fetch', '^refs/heads/staging.*') + return repo + else: + _logger.warning( + "Unable to acquire %s: %s", + repo_dir, + "doesn't exist" if not repo_dir.exists()\ + else oct(stat.S_IFMT(repo_dir.stat().st_mode)) + ) + return None + + +ALWAYS = ('gc.auto=0', 'maintenance.auto=0') + + +def _bypass_limits(): + resource.setrlimit(resource.RLIMIT_AS, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) + + +def git(directory: str) -> 'Repo': + return Repo(directory, check=True) + + +Self = TypeVar("Self", bound="Repo") +class Repo: + def __init__(self, directory, **config) -> None: + self._directory = str(directory) + config.setdefault('stderr', subprocess.PIPE) + self._config = config + self._params = () + + def __getattr__(self, name: str) -> 'GitCommand': + return GitCommand(self, name.replace('_', '-')) + + def _run(self, *args, **kwargs) -> subprocess.CompletedProcess: + opts = {**self._config, **kwargs} + args = ('git', '-C', self._directory)\ + + tuple(itertools.chain.from_iterable(('-c', p) for p in self._params + ALWAYS))\ + + args + try: + return subprocess.run(args, preexec_fn=_bypass_limits, **opts) + except subprocess.CalledProcessError as e: + stream = e.stderr or e.stdout + if stream: + _logger.error("git call error: %s", stream) + raise + + def stdout(self, flag: bool = True) -> Self: + if flag is True: + return self.with_config(stdout=subprocess.PIPE) + elif flag is False: + return self.with_config(stdout=None) + return self.with_config(stdout=flag) + + def check(self, flag: bool) -> Self: + return self.with_config(check=flag) + + def with_config(self, **kw) -> Self: + opts = {**self._config, **kw} + r = Repo(self._directory, **opts) + r._params = self._params + return r + + def with_params(self, *args) -> Self: + r = self.with_config() + r._params = args + return r + + def clone(self, to: str, branch: Optional[str] = None) -> Self: + self._run( + 'clone', + *([] if branch is None else ['-b', branch]), + self._directory, to, + ) + return Repo(to) + + def get_tree(self, commit_hash: str) -> str: + r = self.with_config(check=True).rev_parse(f'{commit_hash}^{{tree}}') + + return r.stdout.strip() + + def rebase(self, dest: str, commits: Sequence[PrCommit]) -> Tuple[str, Dict[str, str]]: + """Implements rebase by hand atop plumbing so: + + - we can work without a working copy + - we can track individual commits (and store the mapping) + + It looks like `--merge-base` is not sufficient for `merge-tree` to + correctly keep track of history, so it loses contents. Therefore + implement in two passes as in the github version. + """ + repo = self.stdout().with_config(text=True, check=False) + + logger = _logger.getChild('rebase') + if not commits: + raise MergeError("PR has no commits") + + prev_tree = repo.get_tree(dest) + prev_original_tree = repo.get_tree(commits[0]['parents'][0]["sha"]) + + new_trees = [] + parent = dest + for original in commits: + if len(original['parents']) != 1: + raise MergeError( + f"commits with multiple parents ({original['sha']}) can not be rebased, " + "either fix the branch to remove merges or merge without " + "rebasing") + + new_trees.append(check(repo.merge_tree(parent, original['sha'])).stdout.strip()) + # allow merging empty commits, but not empty*ing* commits while merging + if prev_original_tree != original['commit']['tree']['sha']: + if new_trees[-1] == prev_tree: + raise MergeError( + f"commit {original['sha']} results in an empty tree when " + f"merged, it is likely a duplicate of a merged commit, " + f"rebase and remove." + ) + + parent = check(repo.commit_tree( + tree=new_trees[-1], + parents=[parent, original['sha']], + message=f'temp rebase {original["sha"]}', + )).stdout.strip() + prev_tree = new_trees[-1] + prev_original_tree = original['commit']['tree']['sha'] + + mapping = {} + for original, tree in zip(commits, new_trees): + authorship = check(repo.show('--no-patch', '--pretty=%an%n%ae%n%ai%n%cn%n%ce', original['sha'])) + author_name, author_email, author_date, committer_name, committer_email =\ + authorship.stdout.splitlines() + + c = check(repo.commit_tree( + tree=tree, + parents=[dest], + message=original['commit']['message'], + author=(author_name, author_email, author_date), + committer=(committer_name, committer_email), + )).stdout.strip() + + logger.debug('copied %s to %s (parent: %s)', original['sha'], c, dest) + dest = mapping[original['sha']] = c + + return dest, mapping + + def merge(self, c1: str, c2: str, msg: str, *, author: Tuple[str, str]) -> str: + repo = self.stdout().with_config(text=True, check=False) + + t = repo.merge_tree(c1, c2) + if t.returncode: + raise MergeError(t.stderr) + + c = self.commit_tree( + tree=t.stdout.strip(), + message=msg, + parents=[c1, c2], + author=author, + ) + if c.returncode: + raise MergeError(c.stderr) + return c.stdout.strip() + + def commit_tree( + self, *, tree: str, message: str, + parents: Sequence[str] = (), + author: Optional[Authorship] = None, + committer: Optional[Authorship] = None, + ) -> subprocess.CompletedProcess: + authorship = {} + if author: + authorship['GIT_AUTHOR_NAME'] = author[0] + authorship['GIT_AUTHOR_EMAIL'] = author[1] + if len(author) > 2: + authorship['GIT_AUTHOR_DATE'] = author[2] + if committer: + authorship['GIT_COMMITTER_NAME'] = committer[0] + authorship['GIT_COMMITTER_EMAIL'] = committer[1] + if len(committer) > 2: + authorship['GIT_COMMITTER_DATE'] = committer[2] + + return self.with_config( + input=message, + stdout=subprocess.PIPE, + text=True, + env={ + **os.environ, + **authorship, + # we don't want git to use the timezone of the machine it's + # running on: previously it used the timezone configured in + # github (?), which I think / assume defaults to a generic UTC + 'TZ': 'UTC', + } + )._run( + 'commit-tree', + tree, + '-F', '-', + *itertools.chain.from_iterable(('-p', p) for p in parents), + ) + +def check(p: subprocess.CompletedProcess) -> subprocess.CompletedProcess: + if not p.returncode: + return p + + _logger.info("rebase failed at %s\nstdout:\n%s\nstderr:\n%s", p.args, p.stdout, p.stderr) + raise MergeError(p.stderr or 'merge conflict') + + +@dataclasses.dataclass +class GitCommand: + repo: Repo + name: str + + def __call__(self, *args, **kwargs) -> subprocess.CompletedProcess: + return self.repo._run(self.name, *args, *self._to_options(kwargs)) + + def _to_options(self, d): + for k, v in d.items(): + if len(k) == 1: + yield '-' + k + else: + yield '--' + k.replace('_', '-') + if v not in (None, True): + assert v is not False + yield str(v) diff --git a/runbot_merge/github.py b/runbot_merge/github.py index 27cf6305..98d2e4a3 100644 --- a/runbot_merge/github.py +++ b/runbot_merge/github.py @@ -1,13 +1,14 @@ import collections.abc import itertools -import json as json_ +import json import logging import logging.handlers import os import pathlib import pprint -import textwrap +import time import unicodedata +from typing import Iterable, List, TypedDict, Literal import requests import werkzeug.urls @@ -47,7 +48,47 @@ def _init_gh_logger(): if odoo.netsvc._logger_init: _init_gh_logger() -GH_LOG_PATTERN = """=> {method} /{self._repo}/{path}{qs}{body} +SimpleUser = TypedDict('SimpleUser', { + 'login': str, + 'url': str, + 'type': Literal['User', 'Organization'], +}) +Authorship = TypedDict('Authorship', { + 'name': str, + 'email': str, +}) +CommitTree = TypedDict('CommitTree', { + 'sha': str, + 'url': str, +}) +Commit = TypedDict('Commit', { + 'tree': CommitTree, + 'url': str, + 'message': str, + # optional when creating a commit + 'author': Authorship, + 'committer': Authorship, + 'comments_count': int, +}) +CommitLink = TypedDict('CommitLink', { + 'html_url': str, + 'sha': str, + 'url': str, +}) +PrCommit = TypedDict('PrCommit', { + 'url': str, + 'sha': str, + 'commit': Commit, + # optional when creating a commit (in which case it uses the current user) + 'author': SimpleUser, + 'committer': SimpleUser, + 'parents': List[CommitLink], + # not actually true but we're smuggling stuff via that key + 'new_tree': str, +}) + + +GH_LOG_PATTERN = """=> {method} {path}{qs}{body} <= {r.status_code} {r.reason} {headers} @@ -58,11 +99,12 @@ class GH(object): def __init__(self, token, repo): self._url = 'https://api.github.com' self._repo = repo + self._last_update = 0 session = self._session = requests.Session() session.headers['Authorization'] = 'token {}'.format(token) session.headers['Accept'] = 'application/vnd.github.symmetra-preview+json' - def _log_gh(self, logger, method, path, params, json, response, level=logging.INFO): + def _log_gh(self, logger: logging.Logger, response: requests.Response, level: int = logging.INFO, extra=None): """ Logs a pair of request / response to github, to the specified logger, at the specified level. @@ -70,11 +112,14 @@ class GH(object): bodies, at least in part) so we have as much information as possible for post-mortems. """ - body = body2 = '' + req = response.request + url = werkzeug.urls.url_parse(req.url) + if url.netloc != 'api.github.com': + return - if json: - body = '\n' + textwrap.indent('\t', pprint.pformat(json, indent=4)) + body = '' if not req.body else ('\n' + pprint.pformat(json.loads(req.body.decode()), indent=4)) + body2 = '' if response.content: if _is_json(response): body2 = pprint.pformat(response.json(), depth=4) @@ -87,41 +132,45 @@ class GH(object): ) logger.log(level, GH_LOG_PATTERN.format( - self=self, # requests data - method=method, path=path, - qs='' if not params else ('?' + werkzeug.urls.url_encode(params)), - body=utils.shorten(body.strip(), 400), + method=req.method, path=url.path, qs=url.query, body=body, # response data r=response, headers='\n'.join( '\t%s: %s' % (h, v) for h, v in response.headers.items() ), body2=utils.shorten(body2.strip(), 400) - )) - return body2 + ), extra=extra) def __call__(self, method, path, params=None, json=None, check=True): """ :type check: bool | dict[int:Exception] """ + if method.casefold() != 'get': + to_sleep = 1. - (time.time() - self._last_update) + if to_sleep > 0: + time.sleep(to_sleep) + path = f'/repos/{self._repo}/{path}' r = self._session.request(method, self._url + path, params=params, json=json) - self._log_gh(_gh, method, path, params, json, r) + if method.casefold() != 'get': + self._last_update = time.time() + int(r.headers.get('Retry-After', 0)) + + self._log_gh(_gh, r) if check: - if isinstance(check, collections.abc.Mapping): - exc = check.get(r.status_code) - if exc: - raise exc(r.text) - if r.status_code >= 400: - body = self._log_gh( - _logger, method, path, params, json, r, level=logging.ERROR) - if not isinstance(body, (bytes, str)): - raise requests.HTTPError( - json_.dumps(body, indent=4), - response=r - ) - r.raise_for_status() + try: + if isinstance(check, collections.abc.Mapping): + exc = check.get(r.status_code) + if exc: + raise exc(r.text) + if r.status_code >= 400: + raise requests.HTTPError(r.text, response=r) + except Exception: + self._log_gh(_logger, r, level=logging.ERROR, extra={ + 'github-request-id': r.headers.get('x-github-request-id'), + }) + raise + return r def user(self, username): @@ -129,7 +178,7 @@ class GH(object): r.raise_for_status() return r.json() - def head(self, branch): + def head(self, branch: str) -> str: d = utils.backoff( lambda: self('get', 'git/refs/heads/{}'.format(branch)).json(), exc=requests.HTTPError @@ -180,13 +229,17 @@ class GH(object): if r.status_code == 200: head = r.json()['object']['sha'] else: - head = '<Response [%s]: %s)>' % (r.status_code, r.json() if _is_json(r) else r.text) + head = '<Response [%s]: %s)>' % (r.status_code, r.text) if head == to: _logger.debug("Sanity check ref update of %s to %s: ok", branch, to) return - _logger.warning("Sanity check ref update of %s, expected %s got %s", branch, to, head) + _logger.warning( + "Sanity check ref update of %s, expected %s got %s (response-id %s)", + branch, to, head, + r.headers.get('x-github-request-id') + ) return head def fast_forward(self, branch, sha): @@ -200,7 +253,7 @@ class GH(object): raise exceptions.FastForwardError(self._repo) \ from Exception("timeout: never saw %s" % sha) except requests.HTTPError as e: - _logger.debug('fast_forward(%s, %s, %s) -> ERROR', self._repo, branch, sha, exc_info=True) + _logger.debug('fast_forward(%s, %s, %s) -> %s', self._repo, branch, sha, e) if e.response.status_code == 422: try: r = e.response.json() @@ -220,7 +273,7 @@ class GH(object): status0 = r.status_code _logger.debug( - 'ref_set(%s, %s, %s -> %s (%s)', + 'set_ref(%s, %s, %s -> %s (%s)', self._repo, branch, sha, status0, 'OK' if status0 == 200 else r.text or r.reason ) @@ -264,82 +317,6 @@ class GH(object): f"Sanity check ref update of {branch}, expected {sha} got {head}" return status - def merge(self, sha, dest, message): - r = self('post', 'merges', json={ - 'base': dest, - 'head': sha, - 'commit_message': message, - }, check={409: MergeError}) - try: - r = r.json() - except Exception: - raise MergeError("Got non-JSON reponse from github: %s %s (%s)" % (r.status_code, r.reason, r.text)) - _logger.debug( - "merge(%s, %s (%s), %s) -> %s", - self._repo, dest, r['parents'][0]['sha'], - shorten(message), r['sha'] - ) - return dict(r['commit'], sha=r['sha'], parents=r['parents']) - - def rebase(self, pr, dest, reset=False, commits=None): - """ Rebase pr's commits on top of dest, updates dest unless ``reset`` - is set. - - Returns the hash of the rebased head and a map of all PR commits (to the PR they were rebased to) - """ - logger = _logger.getChild('rebase') - original_head = self.head(dest) - if commits is None: - commits = self.commits(pr) - - logger.debug("rebasing %s, %s on %s (reset=%s, commits=%s)", - self._repo, pr, dest, reset, len(commits)) - - assert commits, "can't rebase a PR with no commits" - prev = original_head - for original in commits: - assert len(original['parents']) == 1, "can't rebase commits with more than one parent" - tmp_msg = 'temp rebasing PR %s (%s)' % (pr, original['sha']) - merged = self.merge(original['sha'], dest, tmp_msg) - - # whichever parent is not original['sha'] should be what dest - # deref'd to, and we want to check that matches the "left parent" we - # expect (either original_head or the previously merged commit) - [base_commit] = (parent['sha'] for parent in merged['parents'] - if parent['sha'] != original['sha']) - assert prev == base_commit,\ - "Inconsistent view of %s between head (%s) and merge (%s)" % ( - dest, prev, base_commit, - ) - prev = merged['sha'] - original['new_tree'] = merged['tree']['sha'] - - prev = original_head - mapping = {} - for c in commits: - committer = c['commit']['committer'] - committer.pop('date') - copy = self('post', 'git/commits', json={ - 'message': c['commit']['message'], - 'tree': c['new_tree'], - 'parents': [prev], - 'author': c['commit']['author'], - 'committer': committer, - }, check={409: MergeError}).json() - logger.debug('copied %s to %s (parent: %s)', c['sha'], copy['sha'], prev) - prev = mapping[c['sha']] = copy['sha'] - - if reset: - self.set_ref(dest, original_head) - else: - self.set_ref(dest, prev) - - logger.debug('rebased %s, %s on %s (reset=%s, commits=%s) -> %s', - self._repo, pr, dest, reset, len(commits), - prev) - # prev is updated after each copy so it's the rebased PR head - return prev, mapping - # fetch various bits of issues / prs to load them def pr(self, number): return ( @@ -361,14 +338,14 @@ class GH(object): if not r.links.get('next'): return - def commits_lazy(self, pr): + def commits_lazy(self, pr: int) -> Iterable[PrCommit]: for page in itertools.count(1): - r = self('get', 'pulls/{}/commits'.format(pr), params={'page': page}) + r = self('get', f'pulls/{pr}/commits', params={'page': page}) yield from r.json() if not r.links.get('next'): return - def commits(self, pr): + def commits(self, pr: int) -> List[PrCommit]: """ Returns a PR's commits oldest first (that's what GH does & is what we want) """ diff --git a/runbot_merge/migrations/13.0.1.6/pre-migration.py b/runbot_merge/migrations/13.0.1.6/pre-migration.py index a7903018..639c5013 100644 --- a/runbot_merge/migrations/13.0.1.6/pre-migration.py +++ b/runbot_merge/migrations/13.0.1.6/pre-migration.py @@ -1,6 +1,3 @@ -import collections - - def migrate(cr, version): """ Status overrides: o2m -> m2m """ diff --git a/runbot_merge/migrations/15.0.1.10/pre-migration.py b/runbot_merge/migrations/15.0.1.10/pre-migration.py new file mode 100644 index 00000000..a9ca29d3 --- /dev/null +++ b/runbot_merge/migrations/15.0.1.10/pre-migration.py @@ -0,0 +1,11 @@ +""" Migration for the unified commands parser, fp_github fields moved from +forwardport to mergebot (one of them is removed but we might not care) +""" +def migrate(cr, version): + cr.execute(""" + UPDATE ir_model_data + SET module = 'runbot_merge' + WHERE module = 'forwardport' + AND model = 'ir.model.fields' + AND name in ('fp_github_token', 'fp_github_name') + """) diff --git a/runbot_merge/migrations/15.0.1.11/pre-migration.py b/runbot_merge/migrations/15.0.1.11/pre-migration.py new file mode 100644 index 00000000..521e148b --- /dev/null +++ b/runbot_merge/migrations/15.0.1.11/pre-migration.py @@ -0,0 +1,124 @@ +def move_fields(cr, *names): + cr.execute(""" + UPDATE ir_model_data + SET module = 'runbot_merge' + WHERE module = 'forwardport' + AND model = 'runbot_merge_pull_requests' + AND name IN %s + """, [names]) + +def migrate(cr, version): + # cleanup some old crap + cr.execute(""" + ALTER TABLE runbot_merge_project_freeze + DROP COLUMN IF EXISTS release_label, + DROP COLUMN IF EXISTS bump_label + """) + + # fw constraint moved to mergebot, alongside all the fields it constrains + cr.execute(""" + UPDATE ir_model_data + SET module = 'runbot_merge' + WHERE module = 'forwardport' + AND model = 'ir.model.constraint' + AND name = 'constraint_runbot_merge_pull_requests_fw_constraint' + """) + move_fields( + cr, 'merge_date', 'refname', + 'limit_id', 'source_id', 'parent_id', 'root_id', 'forwardport_ids', + 'detach_reason', 'fw_policy') + + # view depends on pr.state, which prevents changing the state column's type + # we can just drop the view and it'll be recreated by the db update + cr.execute("DROP VIEW runbot_merge_freeze_labels") + # convert a few data types + cr.execute(""" + CREATE TYPE runbot_merge_pull_requests_priority_type + AS ENUM ('default', 'priority', 'alone'); + + CREATE TYPE runbot_merge_pull_requests_state_type + AS ENUM ('opened', 'closed', 'validated', 'approved', 'ready', 'merged', 'error'); + + CREATE TYPE runbot_merge_pull_requests_merge_method_type + AS ENUM ('merge', 'rebase-merge', 'rebase-ff', 'squash'); + + CREATE TYPE runbot_merge_pull_requests_status_type + AS ENUM ('pending', 'failure', 'success'); + + + ALTER TABLE runbot_merge_pull_requests + ALTER COLUMN priority + TYPE runbot_merge_pull_requests_priority_type + USING CASE WHEN priority = 0 + THEN 'alone' + ELSE 'default' + END::runbot_merge_pull_requests_priority_type, + ALTER COLUMN state + TYPE runbot_merge_pull_requests_state_type + USING state::runbot_merge_pull_requests_state_type, + ALTER COLUMN merge_method + TYPE runbot_merge_pull_requests_merge_method_type + USING merge_method::runbot_merge_pull_requests_merge_method_type; + """) + + cr.execute(""" + ALTER TABLE runbot_merge_pull_requests + ADD COLUMN closed boolean not null default 'false', + ADD COLUMN error boolean not null default 'false', + ADD COLUMN skipchecks boolean not null default 'false', + ADD COLUMN cancel_staging boolean not null default 'false', + + ADD COLUMN statuses text not null default '{}', + ADD COLUMN statuses_full text not null default '{}', + ADD COLUMN status runbot_merge_pull_requests_status_type not null default 'pending' + """) + # first pass: update all the new unconditional (or simple) fields + cr.execute(""" + UPDATE runbot_merge_pull_requests p + SET closed = state = 'closed', + error = state = 'error', + skipchecks = priority = 'alone', + cancel_staging = priority = 'alone', + fw_policy = CASE fw_policy WHEN 'ci' THEN 'default' ELSE fw_policy END, + reviewed_by = CASE state + -- old version did not reset reviewer on PR update + WHEN 'opened' THEN NULL + WHEN 'validated' THEN NULL + -- if a PR predates the reviewed_by field, assign odoobot as reviewer + WHEN 'merged' THEN coalesce(reviewed_by, 2) + ELSE reviewed_by + END, + status = CASE state + WHEN 'validated' THEN 'success' + WHEN 'ready' THEN 'success' + WHEN 'merged' THEN 'success' + ELSE 'pending' + END::runbot_merge_pull_requests_status_type + """) + + # the rest only gets updated if we have a matching commit which is not + # always the case + cr.execute(""" + CREATE TEMPORARY TABLE parents ( id INTEGER not null, overrides jsonb not null ); + WITH RECURSIVE parent_chain AS ( + SELECT id, overrides::jsonb + FROM runbot_merge_pull_requests + WHERE parent_id IS NULL + UNION ALL + SELECT p.id, coalesce(pc.overrides || p.overrides::jsonb, pc.overrides, p.overrides::jsonb) as overrides + FROM runbot_merge_pull_requests p + JOIN parent_chain pc ON p.parent_id = pc.id + ) + INSERT INTO parents SELECT * FROM parent_chain; + CREATE INDEX ON parents (id); + + UPDATE runbot_merge_pull_requests p + SET statuses = jsonb_pretty(c.statuses::jsonb)::text, + statuses_full = jsonb_pretty( + c.statuses::jsonb + || coalesce((select overrides from parents where id = p.parent_id), '{}') + || overrides::jsonb + )::text + FROM runbot_merge_commit c + WHERE p.head = c.sha + """) diff --git a/runbot_merge/migrations/15.0.1.12/pre-migration.py b/runbot_merge/migrations/15.0.1.12/pre-migration.py new file mode 100644 index 00000000..efcc27af --- /dev/null +++ b/runbot_merge/migrations/15.0.1.12/pre-migration.py @@ -0,0 +1,833 @@ +"""This is definitely the giantest of fucks as pretty much the entire model was +reworked +""" +import dataclasses +import logging +from collections import defaultdict +from itertools import chain +from typing import TypeVar, Any + +from psycopg2.extras import execute_batch, execute_values +from psycopg2.sql import SQL + +logger = logging.getLogger("odoo.modules.migration.runbot_merge.15.0.1.12") + +def cleanup(cr): + """There seems to be some *pretty* weird database state having crept + """ + # Until 2021 (not sure why exactly) a bunch of batches were created with no + # PRs, some staged and some not. + logger.info("Delete batches without PRs...") + cr.execute(""" + DELETE FROM runbot_merge_batch + WHERE id IN ( + SELECT b.id + FROM runbot_merge_batch b + LEFT JOIN runbot_merge_batch_runbot_merge_pull_requests_rel r ON (b.id = r.runbot_merge_batch_id) + WHERE r.runbot_merge_batch_id IS NULL + ) + """) + # some of the batches above were the only ones of their stagings + logger.info("Delete stagings without batches...") + cr.execute(""" + DELETE FROM runbot_merge_stagings + WHERE id IN ( + SELECT s.id + FROM runbot_merge_stagings s + LEFT JOIN runbot_merge_batch b ON (s.id = b.staging_id) + WHERE b.id IS NULL + ) + """) + + # check PRs whose source has a source + cr.execute(""" + SELECT + p.id AS id, + s.id AS source_id, + r.name || '#' || p.number AS pr, + pr.name || '#' || pp.number AS parent, + sr.name || '#' || s.number AS source + + FROM runbot_merge_pull_requests p + JOIN runbot_merge_repository r ON (r.id = p.repository) + + JOIN runbot_merge_pull_requests pp ON (pp.id = p.source_id) + JOIN runbot_merge_repository pr ON (pr.id = pp.repository) + + JOIN runbot_merge_pull_requests s ON (s.id = pp.source_id) + JOIN runbot_merge_repository sr ON (sr.id = s.repository) + ORDER BY p.id; + """) + for pid, ssid, _, _, _ in cr.fetchall(): + cr.execute("UPDATE runbot_merge_pull_requests SET source_id = %s WHERE id = %s", [ssid, pid]) + +def hlink(url): + """A terminal hlink starts with OSC8;{params};{link}ST and ends with the + sequence with no params or link + """ + return f'\x9d8;;{url}\x9c' + +def link(label, url): + return f"{hlink(url)}{label}{hlink('')}" + + +def batch_freezes(cr): + """Old freezes were created batch-less but marked as merged, to make things + more consistent and avoid losing them for e.g. synthetic git histories, + associate then with synthetic successful stagings + """ + cr.execute("SELECT id FROM res_users WHERE login = 'moc@odoo.com'") + [uid] = cr.fetchone() + cr.execute(""" + SELECT + array_agg(DISTINCT p.target) AS target, + array_agg(DISTINCT p.merge_date) AS merge_date, + json_object_agg(r.id, json_build_object( + 'id', p.id, + 'head', p.commits_map::json->'' + )) AS prs + + FROM runbot_merge_pull_requests p + JOIN runbot_merge_repository r ON (r.id = p.repository) + JOIN runbot_merge_branch t ON (t.id = p.target) + + LEFT JOIN runbot_merge_batch_runbot_merge_pull_requests_rel bp ON (runbot_merge_pull_requests_id = p.id) + LEFT JOIN runbot_merge_batch b ON (runbot_merge_batch_id = b.id) + LEFT JOIN runbot_merge_stagings s ON (b.staging_id = s.id) + + WHERE p.state = 'merged' + AND runbot_merge_pull_requests_id IS NULL + AND p.id != 1 + + GROUP BY label; + """) + freeze_batches = [ + (target, merge_date, {int(r): p for r, p in prs.items()}) + for [target], [merge_date], prs in cr._obj + ] + + stagings = [] + for t, m, prs in freeze_batches: + # fetch the preceding successful staging on master + cr.execute(""" + SELECT id + FROM runbot_merge_stagings + -- target 1 = master (so we want the last successful master staging before the freeze) + WHERE state = 'success' AND staged_at < %s AND target = 1 + ORDER BY staged_at DESC + LIMIT 1 + """, [m]) + cr.execute(""" + SELECT repository_id, commit_id + FROM runbot_merge_stagings_commits + WHERE staging_id = %s + """, cr.fetchone()) + commits = dict(cr._obj) + + cr.execute(""" + INSERT INTO runbot_merge_stagings + (state, active, create_uid, write_uid, target, staged_at, create_date, write_date) + VALUES ('success', false, %s, %s, %s, %s, %s, %s) + RETURNING id + """, [uid, uid, t, m, m, m]) + [[staging]] = cr.fetchall() + stagings.append(staging) + + for repo, pr in prs.items(): + if repo not in commits: + cr.execute(""" + INSERT INTO runbot_merge_commit (sha) VALUES (%s) + ON CONFLICT (sha) DO UPDATE + SET to_check = runbot_merge.to_check + RETURNING id + """, [pr['head']]) + [cid] = cr.fetchone() + commits[repo] = cid + + for repo, commit in commits.items(): + cr.execute(""" + INSERT INTO runbot_merge_stagings_commits + (staging_id, repository_id, commit_id) + VALUES (%s, %s, %s) + """, [staging, repo, commit]) + cr.execute(""" + INSERT INTO runbot_merge_stagings_heads + (staging_id, repository_id, commit_id) + VALUES (%s, %s, %s) + """, [staging, repo, commit]) + + batches = [] + for staging, (_, date, _) in zip(stagings, freeze_batches): + cr.execute(""" + INSERT INTO runbot_merge_batch + (create_uid, write_uid, staging_id, create_date, write_date) + VALUES (%s, %s, %s, %s, %s) + RETURNING id + """, [uid, uid, staging, date, date]) + [[batch]] = cr.fetchall() + batches.append(batch) + + for batch, (_, _, prs) in zip(batches, freeze_batches): + for pr in prs.values(): + cr.execute(""" + INSERT INTO runbot_merge_batch_runbot_merge_pull_requests_rel + (runbot_merge_batch_id, runbot_merge_pull_requests_id) + VALUES (%s, %s) + """, [batch, pr['id']]) + + +def migrate(cr, version): + cr.execute("select from forwardport_batches") + assert not cr.rowcount, f"can't migrate the mergebot with enqueued forward ports (found {cr.rowcount})" + # avoid SQL taking absolutely ungodly amounts of time + cr.execute("SET statement_timeout = '60s'") + # will be recreated & computed on the fly + cr.execute(""" + ALTER TABLE runbot_merge_batch + DROP COLUMN target, + DROP COLUMN active + """) + + cleanup(cr) + batch_freezes(cr) + + cr.execute(""" + SELECT + source_name, + array_agg(json_build_array(gs.target, gs.prs) order by gs.seq desc) + FROM ( + SELECT + rr.name || '#' || source.number as source_name, + t.sequence as seq, + t.name as target, + array_agg(json_build_array(r.name || '#' || p.number, p.state)) as prs + + FROM runbot_merge_pull_requests p + JOIN runbot_merge_repository r ON (r.id = p.repository) + JOIN runbot_merge_branch t ON (t.id = p.target) + + JOIN runbot_merge_pull_requests source ON (source.id = p.source_id) + JOIN runbot_merge_repository rr ON (rr.id = source.repository) + + GROUP BY source.id, rr.id, t.id + HAVING count(*) FILTER (WHERE p.state = 'merged') > 1 + ) gs + GROUP BY source_name + """) + if cr.rowcount: + msg = "Found inconsistent batches, which will confuse later chaining\n\n" + for source, per_target in cr._obj: + msg += f"source {source}\n" + for target, prs in per_target: + msg += "\t{} {}\n".format( + target, + ", ".join(f'{p} ({s})' for p, s in prs), + ) + raise Exception(msg) + + logger.info("add batch columns...") + cr.execute(""" + CREATE TYPE runbot_merge_batch_priority + AS ENUM ('default', 'priority', 'alone'); + + ALTER TABLE runbot_merge_batch + -- backfilled from staging + ADD COLUMN merge_date timestamp, + -- backfilled from PRs + ADD COLUMN priority runbot_merge_batch_priority NOT NULL DEFAULT 'default', + ADD COLUMN skipchecks boolean NOT NULL DEFAULT false, + ADD COLUMN cancel_staging boolean NOT NULL DEFAULT false, + ADD COLUMN fw_policy varchar NOT NULL DEFAULT 'default' + ; + """) + # batches not linked to stagings are likely to be useless + logger.info("add batch/staging join table...") + cr.execute(""" + CREATE TABLE runbot_merge_staging_batch ( + id serial PRIMARY KEY, + runbot_merge_batch_id integer NOT NULL REFERENCES runbot_merge_batch(id) ON DELETE CASCADE, + runbot_merge_stagings_id integer NOT NULL REFERENCES runbot_merge_stagings(id) ON DELETE CASCADE + ); + CREATE UNIQUE INDEX runbot_merge_staging_batch_idx ON runbot_merge_staging_batch + (runbot_merge_stagings_id, runbot_merge_batch_id); + CREATE INDEX runbot_merge_staging_batch_rev ON runbot_merge_staging_batch + (runbot_merge_batch_id) INCLUDE (runbot_merge_stagings_id); + """) + # old 'bot creates a new batch at staging time, associated with that + # specific staging, the way to recoup them (to the best of our ability) is + # to assume a new style batch is a set of PRs, so if we group batches by prs + # we get more or less the set of relevant batches / stagings + logger.info("collect batches...") + clusters, to_batch = collate_real_batches(cr) + + logger.info("collate batches...") + to_delete = [] + batch_staging_links = [] + to_rejoin = [] + for cluster in clusters.clusters: + first = cluster.merged_batch or min(cluster.batches) + to_delete.extend(cluster.batches - {first}) + # link all the PRs back to that batch + to_rejoin.append((first, list(cluster.prs))) + # link `first` to `staging`, ordering insertions by `batch` in order + # to conserve batching order + batch_staging_links.extend( + (batch, first, staging) + for batch, staging in cluster.stagings + ) + + logger.info("link batches to stagings...") + # sort (unique_batch, staging) by initial batch so that we create the new + # bits in the correct order hopefully + batch_staging_links.sort() + execute_values( + cr._obj, + "INSERT INTO runbot_merge_staging_batch (runbot_merge_batch_id, runbot_merge_stagings_id) VALUES %s", + ((b, s) for _, b, s in batch_staging_links), + page_size=1000, + ) + + logger.info("detach PRs from \"active\" batches...") + # there are non-deactivated batches floating around, which are not linked + # to stagings, they seem linked to updates (forward-ported PRs getting + # updated), but not exclusively + cr.execute("UPDATE runbot_merge_pull_requests SET batch_id = NULL WHERE batch_id IS NOT NULL") + # drop constraint because pg checks it even though we've set all the active batches to null + cr.execute("ALTER TABLE runbot_merge_pull_requests DROP CONSTRAINT runbot_merge_pull_requests_batch_id_fkey") + + while to_delete: + ds, to_delete = to_delete[:10000], to_delete[10000:] + logger.info("delete %d leftover batches", len(ds)) + cr.execute("DELETE FROM runbot_merge_batch WHERE id = any(%s)", [ds]) + + logger.info("delete staging column...") + cr.execute("ALTER TABLE runbot_merge_batch DROP COLUMN staging_id;") + + logger.info("relink PRs...") + cr.execute("DROP TABLE runbot_merge_batch_runbot_merge_pull_requests_rel") + execute_batch( + cr._obj, + "UPDATE runbot_merge_pull_requests SET batch_id = %s WHERE id = any(%s)", + to_rejoin, + page_size=1000, + ) + + # at this point all the surviving batches should have associated PRs + cr.execute(""" + SELECT b.id + FROM runbot_merge_batch b + LEFT JOIN runbot_merge_pull_requests p ON p.batch_id = b.id + WHERE p IS NULL; + """) + if cr.rowcount: + logger.error( + "All batches should have at least one PR, found %d without", + cr.rowcount, + ) + + # the relinked batches are those from stagings, but that means merged PRs + # (or at least PRs we tried to merge), we also need batches for non-closed + # non-merged PRs + logger.info("collect unbatched PRs...") + cr.execute(""" + SELECT + CASE + WHEN label SIMILAR TO '%%:patch-[[:digit:]]+' + THEN id::text + ELSE label + END as label_but_not, + array_agg(id), + array_agg(distinct target) + FROM runbot_merge_pull_requests + WHERE batch_id IS NULL AND id != all(%s) + GROUP BY label_but_not + """, [[pid for b in to_batch for pid in b]]) + for _label, ids, targets in cr._obj: + # a few batches are nonsensical e.g. multiple PRs on different + # targets from th same branch or mix of master upgrade and stable + # branch community, split them out + if len(targets) > 1: + to_batch.extend([id] for id in ids) + else: + to_batch.append(ids) + + logger.info("create %d new batches for unbatched prs...", len(to_batch)) + cr.execute( + SQL("INSERT INTO runbot_merge_batch VALUES {} RETURNING id").format( + SQL(", ").join([SQL("(DEFAULT)")]*len(to_batch)))) + logger.info("link unbatched PRs to batches...") + execute_batch( + cr._obj, + "UPDATE runbot_merge_pull_requests SET batch_id = %s WHERE id = any(%s)", + [(batch_id, ids) for ids, [batch_id] in zip(to_batch, cr.fetchall())], + page_size=1000, + ) + + cr.execute("SELECT state, count(*) FROM runbot_merge_pull_requests WHERE batch_id IS NULL GROUP BY state") + if cr.rowcount: + prs = cr.fetchall() + logger.error( + "Found %d PRs without a batch:%s", + sum(c for _, c in prs), + "".join( + f"\n\t- {c} {p!r} PRs" + for p, c in prs + ), + ) + + logger.info("move pr data to batches...") + cr.execute(""" + UPDATE runbot_merge_batch b + SET merge_date = v.merge_date, + priority = v.p::varchar::runbot_merge_batch_priority, + skipchecks = v.skipchecks, + cancel_staging = v.cancel_staging, + fw_policy = case when v.skipci + THEN 'skipci' + ELSE 'default' + END + FROM ( + SELECT + batch_id as id, + max(priority) as p, + min(merge_date) as merge_date, + -- added to PRs in 1.11 so can be aggregated & copied over + bool_or(skipchecks) as skipchecks, + bool_or(cancel_staging) as cancel_staging, + bool_or(fw_policy = 'skipci') as skipci + FROM runbot_merge_pull_requests + GROUP BY batch_id + ) v + WHERE b.id = v.id + """) + + logger.info("restore batch constraint...") + cr.execute(""" + ALTER TABLE runbot_merge_pull_requests + ADD CONSTRAINT runbot_merge_pull_requests_batch_id_fkey + FOREIGN KEY (batch_id) + REFERENCES runbot_merge_batch (id) + """) + + # remove xid for x_prs (not sure why it exists) + cr.execute(""" + DELETE FROM ir_model_data + WHERE module = 'forwardport' + AND name = 'field_forwardport_batches__x_prs' + """) + # update (x_)prs to match the updated field type(s) + cr.execute(""" + UPDATE ir_model_fields + SET ttype = 'one2many', + relation = 'runbot_merge.pull_requests', + relation_field = 'batch_id' + WHERE model_id = 445 AND name = 'prs'; + + UPDATE ir_model_fields + SET ttype = 'one2many' + WHERE model_id = 448 AND name = 'x_prs'; + """) + + logger.info("generate batch parenting...") + cr.execute("SELECT id, project_id, name FROM runbot_merge_branch ORDER BY project_id, sequence, name") + # branch_id -> str + branch_names = {} + # branch_id -> project_id + projects = {} + # project_id -> list[branch_id] + branches_for_project = {} + for bid, pid, name in cr._obj: + branch_names[bid] = name + projects[bid] = pid + branches_for_project.setdefault(pid, []).append(bid) + cr.execute(""" + SELECT batch_id, + array_agg(distinct target), + array_agg(json_build_object( + 'id', p.id, + 'name', r.name || '#' || number, + 'repo', r.name, + 'number', number, + 'state', p.state, + 'source', source_id + )) + FROM runbot_merge_pull_requests p + JOIN runbot_merge_repository r ON (r.id = p.repository) + GROUP BY batch_id + """) + todos = [] + descendants = defaultdict(list) + targets = {} + batches = {} + batch_prs = {} + for batch, target_ids, prs in cr._obj: + assert len(target_ids) == 1, \ + "Found batch with multiple targets {tnames} {prs}".format( + tnames=', '.join(branch_names[id] for id in target_ids), + prs=prs, + ) + + todos.append((batch, target_ids[0], prs)) + batch_prs[batch] = prs + for pr in prs: + pr['link'] = link(pr['name'], "https://mergebot.odoo.com/{repo}/pull/{number}".format_map(pr)) + + targets[pr['id']] = target_ids[0] + batches[pr['id']] = batch + batches[pr['name']] = batch + if pr['source']: + descendants[pr['source']].append(pr['id']) + else: + # put source PRs as their own descendants otherwise the linkage + # fails when trying to find the top-most parent + descendants[pr['id']].append(pr['id']) + assert None not in descendants + + for prs in chain( + KNOWN_BATCHES, + chain.from_iterable(WEIRD_SEQUENCES), + ): + batch_of_prs = {batches[f'odoo/{p}'] for p in prs} + assert len(batch_of_prs) == 1,\ + "assumed {prs} were the same batch, got {batch_of_prs}".format( + prs=', '.join(prs), + batch_of_prs='; '.join( + '{} => {}'.format(p, batches[f'odoo/{p}']) + for p in prs + ) + ) + + prs_of_batch = {pr['name'].removeprefix('odoo/') for pr in batch_prs[batch_of_prs.pop()]} + assert set(prs) == prs_of_batch,\ + "assumed batch would contain {prs}, got {prs_of_batch}".format( + prs=', '.join(prs), + prs_of_batch=', '.join(prs_of_batch), + ) + + parenting = [] + for batch, target, prs in todos: + sources = [p['source'] for p in prs if p['source']] + # can't have parent batch without source PRs + if not sources: + continue + + pid = projects[target] + branches = branches_for_project[pid] + + # we need all the preceding targets in order to jump over disabled branches + previous_targets = branches[branches.index(target) + 1:] + if not previous_targets: + continue + + for previous_target in previous_targets: + # from each source, find the descendant targeting the earlier target, + # then get the batch of these PRs + parents = { + batches[descendant] + for source in sources + for descendant in descendants[source] + if targets[descendant] == previous_target + } + if parents: + break + else: + continue + + if len(parents) == 2: + parents1, parents2 = [batch_prs[parent] for parent in parents] + # if all of one parent are merged and all of the other are not, take the merged side + if all(p['state'] == 'merged' for p in parents1) and all(p['state'] != 'merged' for p in parents2): + parents = [list(parents)[0]] + elif all(p['state'] != 'merged' for p in parents1) and all(p['state'] == 'merged' for p in parents2): + parents = [list(parents)[1]] + elif len(parents1) == 1 and len(parents2) == 1 and len(prs) == 1: + # if one of the candidates is older than the current PR + # (lower id) and the other one younger, assume the first one is + # correct + p = min(parents, key=lambda p: batch_prs[p][0]['id']) + low = batch_prs[p] + high = batch_prs[max(parents, key=lambda p: batch_prs[p][0]['id'])] + if low[0]['id'] < prs[0]['id'] < high[0]['id']: + parents = [p] + + if real_parents := SAAS_135_INSERTION_CONFUSION.get(tuple(sorted(parents))): + parents = real_parents + + assert len(parents) == 1,\ + ("Found multiple candidates for batch {batch} ({prs})" + " with target {target} (previous={previous_target})\n\t{parents}".format( + parents="\n\t".join( + "{} ({})".format( + parent, + ", ".join( + f"{p['link']} ({p['state']}, {branch_names[targets[p['id']]]})" + for p in batch_prs[parent] + ) + ) + for parent in parents + ), + batch=batch, + target=branch_names[target], + previous_target=branch_names[previous_target], + prs=', '.join(map("{link} ({state})".format_map, prs)), + )) + parenting.append((parents.pop(), batch)) + + logger.info("set batch parenting...") + # add column down here otherwise the FK constraint has to be verified for + # each batch we try to delete and that is horrendously slow, deferring the + # constraints is not awesome because we need to check it at the first DDL + # and that's still way slower than feels necessary + cr.execute(""" + ALTER TABLE runbot_merge_batch + ADD COLUMN parent_id integer + REFERENCES runbot_merge_batch(id) + """) + execute_batch( + cr._obj, + "UPDATE runbot_merge_batch SET parent_id = %s WHERE id = %s", + parenting, + page_size=1000, + ) + +@dataclasses.dataclass(slots=True, kw_only=True) +class Cluster: + merged_batch: int | None = None + prs: set[int] = dataclasses.field(default_factory=set) + batches: set[int] = dataclasses.field(default_factory=set) + stagings: set[tuple[int, int]] = dataclasses.field(default_factory=set) + "set of original (batch, staging) pairs" + +@dataclasses.dataclass +class Clusters: + clusters: list[Cluster] = dataclasses.field(default_factory=list) + by_batch: dict[int, Cluster] = dataclasses.field(default_factory=dict) + by_pr: dict[int, Cluster] = dataclasses.field(default_factory=dict) + +@dataclasses.dataclass(slots=True, kw_only=True) +class Batch: + staging: int | None = None + merged: bool = False + prs: set[int] = dataclasses.field(default_factory=set) + +T = TypeVar('T') +def insert(s: set[T], v: T) -> bool: + """Inserts v in s if not in, and returns whether an insertion was needed. + """ + if v in s: + return False + else: + s.add(v) + return True +def collate_real_batches(cr: Any) -> tuple[Clusters, list[list[int]]]: + cr.execute(''' + SELECT + st.id as staging, + st.state as staging_state, + b.id as batch_id, + p.id as pr_id + FROM runbot_merge_batch_runbot_merge_pull_requests_rel br + JOIN runbot_merge_batch b ON (b.id = br.runbot_merge_batch_id) + JOIN runbot_merge_pull_requests as p ON (p.id = br.runbot_merge_pull_requests_id) + LEFT JOIN runbot_merge_stagings st ON (st.id = b.staging_id) + ''') + batch_map: dict[int, Batch] = {} + pr_to_batches = defaultdict(set) + for staging_id, staging_state, batch_id, pr_id in cr.fetchall(): + pr_to_batches[pr_id].add(batch_id) + + if batch := batch_map.get(batch_id): + batch.prs.add(pr_id) + else: + batch_map[batch_id] = Batch( + staging=staging_id, + merged=staging_state == 'success', + prs={pr_id}, + ) + + # maps a PR name to its id + cr.execute(""" + SELECT r.name || '#' || p.number, p.id + FROM runbot_merge_pull_requests p + JOIN runbot_merge_repository r ON (r.id = p.repository) + WHERE r.name || '#' || p.number = any(%s) + """, [[f'odoo/{p}' for seq in WEIRD_SEQUENCES for b in seq if len(b) > 1 for p in b]]) + prmap: dict[str, int] = dict(cr._obj) + to_batch = [] + # for each WEIRD_SEQUENCES batch, we need to merge their batches if any, + # and create them otherwise + for batch in (b for seq in WEIRD_SEQUENCES for b in seq if len(b) > 1): + ids = [prmap[f'odoo/{n}'] for n in batch] + batches = {b for pid in ids for b in pr_to_batches[pid]} + if batches: + for pid in ids: + pr_to_batches[pid].update(batches) + for bid in batches: + batch_map[bid].prs.update(ids) + else: + # need to create a new batch + to_batch.append(ids) + + clusters = Clusters() + # we can start from either the PR or the batch side to reconstruct a cluster + for pr_id in pr_to_batches: + if pr_id in clusters.by_pr: + continue + + to_visit = [pr_id] + prs: set[int] = set() + merged_batch = None + batches: set[int] = set() + stagings: set[tuple[int, int]] = set() + while to_visit: + pr_id = to_visit.pop() + if not insert(prs, pr_id): + continue + + for batch_id in pr_to_batches[pr_id]: + if not insert(batches, batch_id): + continue + + b = batch_map[batch_id] + if s := b.staging: + stagings.add((batch_id, s)) + if b.merged: + merged_batch = batch_id + to_visit.extend(b.prs - prs) + + c = Cluster(merged_batch=merged_batch, prs=prs, batches=batches, stagings=stagings) + clusters.clusters.append(c) + clusters.by_batch.update((batch_id, c) for batch_id in c.batches) + clusters.by_pr.update((pr_id, c) for pr_id in c.prs) + + return clusters, to_batch + +# at the creation of saas 13.5, the forwardbot clearly got very confused and +# somehow did not correctly link the PRs it reinserted together, leading to +# some of them being merged separately, leading the batch parenting linker thing +# to be extremely confused +SAAS_135_INSERTION_CONFUSION = { + (48200, 48237): [48237], + (48353, 48388): [48353], + (48571, 48602): [48602], + (73614, 73841): [73614], +} + +KNOWN_BATCHES = [ + # both closed, same source (should be trivial) + ["odoo#151827", "enterprise#55453"], + ["odoo#66743", "enterprise#16631"], + + # both closed but different sources + ["odoo#57659", "enterprise#13204"], + ["odoo#57752", "enterprise#13238"], + ["odoo#94152", "enterprise#28664"], + ["odoo#114059", "enterprise#37690"], + ["odoo#152904", "enterprise#55975"], + + # one closed the other not, different sources (so a PR was added in the + # middle of a forward port then its descendant was closed evn though the + # other repo / sequence kept on keeping) + ["odoo#113422", "enterprise#37429"], + ["odoo#151992", "enterprise#55501"], + ["odoo#159211", "enterprise#59407"], + + # closed without a sibling but their source had a sibling + ["odoo#67727"], # enterprise closed at enterprise#16631 + ["odoo#70828"], # enterprise closed at enterprise#17901 + ["odoo#132817"], # enterprise closed at enterprise#44656 + ["odoo#137855"], # enterprise closed at enterprise#48092 + ["enterprise#49430"], # odoo closed at odoo#139515 + + ["odoo#109811", "enterprise#35966"], + ["odoo#110311", "enterprise#35983"], + ["odoo#110576"], +] + +# This is next level weird compared to the previous so it gets extra care: +# these are sequences with multiple points of divergence or grafting +WEIRD_SEQUENCES = [ + [ + ["odoo#40466"], + ["odoo#40607"], + ["odoo#40613", "odoo#41106"], + ["odoo#40615", "odoo#41112"], + ["odoo#40627", "odoo#41116", "odoo#41163"], + ["odoo#40638", "odoo#41119", "odoo#41165"], + ], + [ + ["odoo#46405"], + ["odoo#46698"], + ["odoo#46820"], + ["odoo#46974"], + ["odoo#47273"], + ["odoo#47345", "enterprise#9259"], + ["odoo#47349", "odoo#47724", "enterprise#9274"], + ], + [ + ["odoo#47923"], + ["odoo#47986"], + ["odoo#47991", "odoo#48010"], + ["odoo#47996", "odoo#48015", "odoo#48016"], + ["odoo#48003"], + ], + [ + ["enterprise#9996"], + ["enterprise#10062", "odoo#49828"], + ["enterprise#10065", "odoo#49852", "enterprise#10076"], + ["enterprise#10173", "odoo#50087"], + ["enterprise#10179", "odoo#50104"], + ["enterprise#10181", "odoo#50110"], + ], + [ + ["enterprise#16357"], + ["enterprise#16371"], + ["enterprise#16375", "enterprise#16381"], + ["enterprise#16378", "enterprise#16385"], + ["enterprise#16379", "enterprise#16390"], + ], + [ + ["odoo#55112"], + ["odoo#55120"], + ["odoo#55123", "odoo#55159"], + ["odoo#55128", "odoo#55169"], + ["odoo#55135", "odoo#55171"], + ["odoo#55140", "odoo#55172"], + ], + [ + ["odoo#56254", "enterprise#12558"], + ["odoo#56294", "enterprise#12564"], + ["odoo#56300", "enterprise#12566"], + ["odoo#56340", "enterprise#12589", "enterprise#12604"], + ["odoo#56391", "enterprise#12608"], + ], + [ + ["enterprise#12565", "odoo#56299"], + ["enterprise#12572", "odoo#56309", "odoo#56494"], + ["enterprise#12660", "odoo#56518"], + ["enterprise#12688", "odoo#56581"], + ["enterprise#12691"], + ], + [ + ["odoo#64706"], + ["odoo#65275"], + ["odoo#65279", "odoo#65405"], + ["odoo#65489", "odoo#65491"], + ], + [ + ["odoo#66176"], + ["odoo#66188"], + ["odoo#66191"], + ["odoo#66194", "odoo#66226"], + ["odoo#66200", "odoo#66229", "odoo#66277"], + ["odoo#66204", "odoo#66232", "odoo#66283"], + ["odoo#66208", "odoo#66234", "odoo#66285", "odoo#66303"], + ], + [ + ["enterprise#22089", "odoo#79348"], + ["enterprise#26736", "odoo#90050"], + ["enterprise#31822", "odoo#101218", "odoo#106002"], + ["enterprise#36014", "odoo#110369", "odoo#113892"], + ["enterprise#37690", "odoo#114059"], + ], +] diff --git a/runbot_merge/migrations/15.0.1.13/pre-migration.py b/runbot_merge/migrations/15.0.1.13/pre-migration.py new file mode 100644 index 00000000..ed9ba615 --- /dev/null +++ b/runbot_merge/migrations/15.0.1.13/pre-migration.py @@ -0,0 +1,4 @@ +def migrate(cr, version): + cr.execute("ALTER TABLE runbot_merge_stagings " + "ADD COLUMN staging_end timestamp without time zone") + cr.execute("UPDATE runbot_merge_stagings SET staging_end = write_date") diff --git a/runbot_merge/migrations/15.0.1.14/pre-migration.py b/runbot_merge/migrations/15.0.1.14/pre-migration.py new file mode 100644 index 00000000..6ac9ed1f --- /dev/null +++ b/runbot_merge/migrations/15.0.1.14/pre-migration.py @@ -0,0 +1,12 @@ +def migrate(cr, version): + cr.execute(""" + CREATE TABLE runbot_merge_events_sources ( + id serial primary key, + repository varchar not null, + secret varchar + ); + INSERT INTO runbot_merge_events_sources (repository, secret) + SELECT r.name, p.secret + FROM runbot_merge_repository r + JOIN runbot_merge_project p ON p.id = r.project_id; + """) diff --git a/runbot_merge/migrations/15.0.1.15/pre-migration.py b/runbot_merge/migrations/15.0.1.15/pre-migration.py new file mode 100644 index 00000000..531811dd --- /dev/null +++ b/runbot_merge/migrations/15.0.1.15/pre-migration.py @@ -0,0 +1,22 @@ +"""Completely missed that in 44084e303ccece3cb54128ab29eab399bd4d24e9 I +completely changed the semantics and structure of the statuses_cache, so the +old caches don't actually work anymore at all. + +This rewrites all existing caches. +""" +def migrate(cr, version): + cr.execute(""" +WITH statuses AS ( + SELECT + s.id as staging_id, + json_object_agg(c.sha, c.statuses::json) as statuses + FROM runbot_merge_stagings s + LEFT JOIN runbot_merge_stagings_heads h ON (h.staging_id = s.id) + LEFT JOIN runbot_merge_commit c ON (h.commit_id = c.id) + GROUP BY s.id +) +UPDATE runbot_merge_stagings +SET statuses_cache = statuses +FROM statuses +WHERE id = staging_id + """) diff --git a/runbot_merge/migrations/15.0.1.8/pre-migration.py b/runbot_merge/migrations/15.0.1.8/pre-migration.py new file mode 100644 index 00000000..8bf72c44 --- /dev/null +++ b/runbot_merge/migrations/15.0.1.8/pre-migration.py @@ -0,0 +1,6 @@ +from pathlib import Path + +def migrate(cr, version): + sql = Path(__file__).parent.joinpath('upgrade.sql')\ + .read_text(encoding='utf-8') + cr.execute(sql) diff --git a/runbot_merge/migrations/15.0.1.8/upgrade.sql b/runbot_merge/migrations/15.0.1.8/upgrade.sql new file mode 100644 index 00000000..d5dc0502 --- /dev/null +++ b/runbot_merge/migrations/15.0.1.8/upgrade.sql @@ -0,0 +1,62 @@ +CREATE TABLE runbot_merge_stagings_commits ( + id serial NOT NULL, + staging_id integer not null references runbot_merge_stagings (id), + commit_id integer not null references runbot_merge_commit (id), + repository_id integer not null references runbot_merge_repository (id) +); + +CREATE TABLE runbot_merge_stagings_heads ( + id serial NOT NULL, + staging_id integer NOT NULL REFERENCES runbot_merge_stagings (id), + commit_id integer NOT NULL REFERENCES runbot_merge_commit (id), + repository_id integer NOT NULL REFERENCES runbot_merge_repository (id) +); + +-- some of the older stagings only have the head, not the commit, +-- add the commit +UPDATE runbot_merge_stagings + SET heads = heads::jsonb || jsonb_build_object( + 'odoo/odoo^', heads::json->'odoo/odoo', + 'odoo/enterprise^', heads::json->'odoo/enterprise' + ) + WHERE heads NOT ILIKE '%^%'; + +-- some of the stagings have heads which don't exist in the commits table, +-- because they never got a status from the runbot... +-- create fake commits so we don't lose heads +INSERT INTO runbot_merge_commit (sha, statuses, create_uid, create_date, write_uid, write_date) + SELECT r.value, '{}', s.create_uid, s.create_date, s.create_uid, s.create_date + FROM runbot_merge_stagings s, + json_each_text(s.heads::json) r +ON CONFLICT DO NOTHING; + +CREATE TEMPORARY TABLE staging_commits ( + id integer NOT NULL, + repo integer NOT NULL, + -- the staging head (may be a dedup, may be the same as commit) + head integer NOT NULL, + -- the staged commit + commit integer NOT NULL +); +-- the splatting works entirely off of the staged head +-- (the one without the ^ suffix), we concat the `^` to get the corresponding +-- merge head (the actual commit to push to the branch) +INSERT INTO staging_commits (id, repo, head, commit) + SELECT s.id, re.id AS repo, h.id AS head, c.id AS commit + FROM runbot_merge_stagings s, + json_each_text(s.heads::json) r, + runbot_merge_commit h, + runbot_merge_commit c, + runbot_merge_repository re + WHERE r.key NOT ILIKE '%^' + AND re.name = r.key + AND h.sha = r.value + AND c.sha = s.heads::json->>(r.key || '^'); + +INSERT INTO runbot_merge_stagings_heads (staging_id, repository_id, commit_id) +SELECT id, repo, head FROM staging_commits; + +INSERT INTO runbot_merge_stagings_commits (staging_id, repository_id, commit_id) +SELECT id, repo, commit FROM staging_commits; + +ALTER TABLE runbot_merge_stagings DROP COLUMN heads; diff --git a/runbot_merge/migrations/15.0.1.9/pre-migration.py b/runbot_merge/migrations/15.0.1.9/pre-migration.py new file mode 100644 index 00000000..76539967 --- /dev/null +++ b/runbot_merge/migrations/15.0.1.9/pre-migration.py @@ -0,0 +1,32 @@ +from psycopg2.extras import execute_values + + +def migrate(cr, version): + # Drop all legacy style "previous failures": this is for PRs + # several years old so almost certainly long irrelevant, and it + # allows removing the workaround for them. Legacy style has the + # `state`, `target`, `description` keys at the toplevel while new + # style is like commit statuses, with the contexts at the toplevel + # and the status info below. + cr.execute(""" +UPDATE runbot_merge_pull_requests + SET previous_failure = '{}' + WHERE previous_failure::jsonb ? 'state' +""") + + cr.execute(""" +WITH new_statuses (id, statuses) AS ( + SELECT id, json_object_agg( + key, + CASE WHEN jsonb_typeof(value) = 'string' + THEN jsonb_build_object('state', value, 'target_url', null, 'description', null) + ELSE value + END + ) AS statuses + FROM runbot_merge_commit + CROSS JOIN LATERAL jsonb_each(statuses::jsonb) s + WHERE jsonb_path_match(statuses::jsonb, '$.*.type() != "object"') + GROUP BY id +) +UPDATE runbot_merge_commit SET statuses = new_statuses.statuses FROM new_statuses WHERE runbot_merge_commit.id = new_statuses.id + """) diff --git a/runbot_merge/models/__init__.py b/runbot_merge/models/__init__.py index 5cf276c8..ac19320c 100644 --- a/runbot_merge/models/__init__.py +++ b/runbot_merge/models/__init__.py @@ -1,6 +1,11 @@ +from . import mail_thread from . import ir_actions from . import res_partner from . import project from . import pull_requests +from . import batch from . import project_freeze +from . import stagings_create from . import staging_cancel +from . import events_sources +from . import crons diff --git a/runbot_merge/models/batch.py b/runbot_merge/models/batch.py new file mode 100644 index 00000000..df52fc38 --- /dev/null +++ b/runbot_merge/models/batch.py @@ -0,0 +1,537 @@ +from __future__ import annotations + +import base64 +import contextlib +import logging +import os +import re +from collections import defaultdict +from collections.abc import Iterator + +import requests +from psycopg2 import sql + +from odoo import models, fields, api +from .utils import enum +from .. import git + +_logger = logging.getLogger(__name__) +FOOTER = '\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n' + + +class StagingBatch(models.Model): + _name = 'runbot_merge.staging.batch' + _description = "link between batches and staging in order to maintain an " \ + "ordering relationship between the batches of a staging" + _log_access = False + _order = 'id' + + runbot_merge_batch_id = fields.Many2one('runbot_merge.batch', required=True) + runbot_merge_stagings_id = fields.Many2one('runbot_merge.stagings', required=True) + + def init(self): + super().init() + + self.env.cr.execute(sql.SQL(""" + CREATE UNIQUE INDEX IF NOT EXISTS runbot_merge_staging_batch_idx + ON {table} (runbot_merge_stagings_id, runbot_merge_batch_id); + + CREATE INDEX IF NOT EXISTS runbot_merge_staging_batch_rev + ON {table} (runbot_merge_batch_id) INCLUDE (runbot_merge_stagings_id); + """).format(table=sql.Identifier(self._table))) + + +class Batch(models.Model): + """ A batch is a "horizontal" grouping of *codependent* PRs: PRs with + the same label & target but for different repositories. These are + assumed to be part of the same "change" smeared over multiple + repositories e.g. change an API in repo1, this breaks use of that API + in repo2 which now needs to be updated. + """ + _name = 'runbot_merge.batch' + _description = "batch of pull request" + _inherit = ['mail.thread'] + _parent_store = True + _order = "id desc" + + name = fields.Char(compute="_compute_name", search="_search_name") + target = fields.Many2one('runbot_merge.branch', store=True, compute='_compute_target') + batch_staging_ids = fields.One2many('runbot_merge.staging.batch', 'runbot_merge_batch_id') + staging_ids = fields.Many2many( + 'runbot_merge.stagings', + compute="_compute_staging_ids", + context={'active_test': False}, + ) + split_id = fields.Many2one('runbot_merge.split', index=True) + + all_prs = fields.One2many('runbot_merge.pull_requests', 'batch_id') + prs = fields.One2many('runbot_merge.pull_requests', compute='_compute_open_prs', search='_search_open_prs') + active = fields.Boolean(compute='_compute_active', store=True, help="closed batches (batches containing only closed PRs)") + + fw_policy = fields.Selection([ + ('no', "Do not port forward"), + ('default', "Default"), + ('skipci', "Skip CI"), + ], required=True, default="default", string="Forward Port Policy", tracking=True) + + merge_date = fields.Datetime(tracking=True) + # having skipchecks skip both validation *and approval* makes sense because + # it's batch-wise, having to approve individual PRs is annoying + skipchecks = fields.Boolean( + string="Skips Checks", + default=False, tracking=True, + help="Forces entire batch to be ready, skips validation and approval", + ) + cancel_staging = fields.Boolean( + string="Cancels Stagings", + default=False, tracking=True, + help="Cancels current staging on target branch when becoming ready" + ) + priority = fields.Selection([ + ('default', "Default"), + ('priority', "Priority"), + ('alone', "Alone"), + ], default='default', group_operator=None, required=True, tracking=True, + column_type=enum(_name, 'priority'), + ) + + blocked = fields.Char(store=True, compute="_compute_blocked") + + # unlike on PRs, this does not get detached... ? (because batches can be + # partially detached so that's a PR-level concern) + parent_path = fields.Char(index=True, unaccent=False) + parent_id = fields.Many2one("runbot_merge.batch") + genealogy_ids = fields.Many2many( + "runbot_merge.batch", + compute="_compute_genealogy", + context={"active_test": False}, + ) + + @api.depends('batch_staging_ids.runbot_merge_stagings_id') + def _compute_staging_ids(self): + for batch in self: + batch.staging_ids = batch.batch_staging_ids.runbot_merge_stagings_id + + @property + def source(self): + return self.browse(map(int, self.parent_path.split('/', 1)[:1])) + + def descendants(self, include_self: bool = False) -> Iterator[Batch]: + # in DB both will prefix-match on the literal prefix then apply a + # trivial filter (even though the filter is technically unnecessary for + # the first form), doing it like this means we don't have to `- self` + # in the ``not include_self`` case + if include_self: + pattern = self.parent_path + '%' + else: + pattern = self.parent_path + '_%' + + act = self.env.context.get('active_test', True) + return self\ + .with_context(active_test=False)\ + .search([("parent_path", '=like', pattern)], order="parent_path")\ + .with_context(active_test=act) + + # also depends on all the descendants of the source or sth + @api.depends('parent_path') + def _compute_genealogy(self): + for batch in self: + sid = next(iter(batch.parent_path.split('/', 1))) + batch.genealogy_ids = self \ + .with_context(active_test=False)\ + .search([("parent_path", "=like", f"{sid}/%")], order="parent_path")\ + + def _auto_init(self): + for field in self._fields.values(): + if not isinstance(field, fields.Selection) or field.column_type[0] == 'varchar': + continue + + t = field.column_type[1] + self.env.cr.execute("SELECT FROM pg_type WHERE typname = %s", [t]) + if not self.env.cr.rowcount: + self.env.cr.execute( + f"CREATE TYPE {t} AS ENUM %s", + [tuple(s for s, _ in field.selection)] + ) + + super()._auto_init() + + self.env.cr.execute(""" + CREATE INDEX IF NOT EXISTS runbot_merge_batch_ready_idx + ON runbot_merge_batch (target, priority) + WHERE blocked IS NULL; + + CREATE INDEX IF NOT EXISTS runbot_merge_batch_parent_id_idx + ON runbot_merge_batch (parent_id) + WHERE parent_id IS NOT NULL; + """) + + @api.depends('all_prs.closed') + def _compute_active(self): + for b in self: + b.active = not all(p.closed for p in b.all_prs) + + @api.depends('all_prs.closed') + def _compute_open_prs(self): + for b in self: + b.prs = b.all_prs.filtered(lambda p: not p.closed) + + def _search_open_prs(self, operator, value): + return [('all_prs', operator, value), ('active', '=', True)] + + @api.depends("prs.label") + def _compute_name(self): + for batch in self: + batch.name = batch.prs[:1].label or batch.all_prs[:1].label + + def _search_name(self, operator, value): + return [('all_prs.label', operator, value)] + + @api.depends("all_prs.target", "all_prs.closed") + def _compute_target(self): + for batch in self: + targets = batch.prs.mapped('target') or batch.all_prs.mapped('target') + batch.target = targets if len(targets) == 1 else False + + @api.depends( + "merge_date", + "prs.error", "prs.draft", "prs.squash", "prs.merge_method", + "skipchecks", + "prs.status", "prs.reviewed_by", "prs.target", + ) + def _compute_blocked(self): + for batch in self: + if batch.merge_date: + batch.blocked = "Merged." + elif not batch.active: + batch.blocked = "all prs are closed" + elif len(targets := batch.prs.mapped('target')) > 1: + batch.blocked = f"Multiple target branches: {', '.join(targets.mapped('name'))!r}" + elif blocking := batch.prs.filtered( + lambda p: p.error or p.draft or not (p.squash or p.merge_method) + ): + batch.blocked = "Pull request(s) %s blocked." % ', '.join(blocking.mapped('display_name')) + elif not batch.skipchecks and (unready := batch.prs.filtered( + lambda p: not (p.reviewed_by and p.status == "success") + )): + unreviewed = ', '.join(unready.filtered(lambda p: not p.reviewed_by).mapped('display_name')) + unvalidated = ', '.join(unready.filtered(lambda p: p.status == 'pending').mapped('display_name')) + failed = ', '.join(unready.filtered(lambda p: p.status == 'failure').mapped('display_name')) + batch.blocked = "Pull request(s) %s." % ', '.join(filter(None, [ + unreviewed and f"{unreviewed} are waiting for review", + unvalidated and f"{unvalidated} are waiting for CI", + failed and f"{failed} have failed CI", + ])) + else: + if batch.blocked: + self.env.ref("runbot_merge.staging_cron")._trigger() + if batch.cancel_staging: + if splits := batch.target.split_ids: + splits.unlink() + batch.target.active_staging_id.cancel( + 'unstaged by %s becoming ready', + ', '.join(batch.prs.mapped('display_name')), + ) + batch.blocked = False + + + def _port_forward(self): + if not self: + return + + proj = self.target.project_id + if not proj.fp_github_token: + _logger.warning( + "Can not forward-port %s (%s): no token on project %s", + self, + ', '.join(self.prs.mapped('display_name')), + proj.name + ) + return + + notarget = [r.name for r in self.prs.repository if not r.fp_remote_target] + if notarget: + _logger.error( + "Can not forward-port %s (%s): repos %s don't have a forward port remote configured", + self, + ', '.join(self.prs.mapped('display_name')), + ', '.join(notarget), + ) + return + + all_sources = [(p.source_id or p) for p in self.prs] + all_targets = [p._find_next_target() for p in self.prs] + + if all(t is None for t in all_targets): + # TODO: maybe add a feedback message? + _logger.info( + "Will not forward port %s (%s): no next target", + self, + ', '.join(self.prs.mapped('display_name')) + ) + return + + PRs = self.env['runbot_merge.pull_requests'] + targets = defaultdict(lambda: PRs) + for p, t in zip(self.prs, all_targets): + if t: + targets[t] |= p + else: + _logger.info("Skip forward porting %s (of %s): no next target", p.display_name, self) + + + # all the PRs *with a next target* should have the same, we can have PRs + # stopping forward port earlier but skipping... probably not + if len(targets) != 1: + for t, prs in targets.items(): + linked, other = next(( + (linked, other) + for other, linkeds in targets.items() + if other != t + for linked in linkeds + )) + for pr in prs: + self.env.ref('runbot_merge.forwardport.failure.discrepancy')._send( + repository=pr.repository, + pull_request=pr.number, + token_field='fp_github_token', + format_args={'pr': pr, 'linked': linked, 'next': t.name, 'other': other.name}, + ) + _logger.warning( + "Cancelling forward-port of %s (%s): found different next branches (%s)", + self, + ', '.join(self.prs.mapped('display_name')), + ', '.join(t.name for t in targets), + ) + return + + target, prs = next(iter(targets.items())) + # this is run by the cron, no need to check if otherwise scheduled: + # either the scheduled job is this one, or it's an other scheduling + # which will run after this one and will see the port already exists + if self.search_count([('parent_id', '=', self.id), ('target', '=', target.id)]): + _logger.warning( + "Will not forward-port %s (%s): already ported", + self, + ', '.join(prs.mapped('display_name')) + ) + return + + # the base PR is the PR with the "oldest" target + base = max(all_sources, key=lambda p: (p.target.sequence, p.target.name)) + # take only the branch bit + new_branch = '%s-%s-%s-fw' % ( + target.name, + base.refname, + # avoid collisions between fp branches (labels can be reused + # or conflict especially as we're chopping off the owner) + base64.urlsafe_b64encode(os.urandom(3)).decode() + ) + conflicts = {} + for pr in prs: + repo = git.get_local(pr.repository) + conflicts[pr], head = pr._create_fp_branch(repo, target) + + repo.push(git.fw_url(pr.repository), f"{head}:refs/heads/{new_branch}") + + gh = requests.Session() + gh.headers['Authorization'] = 'token %s' % proj.fp_github_token + has_conflicts = any(conflicts.values()) + # could create a batch here but then we'd have to update `_from_gh` to + # take a batch and then `create` to not automatically resolve batches, + # easier to not do that. + new_batch = PRs.browse(()) + self.env.cr.execute('LOCK runbot_merge_pull_requests IN SHARE MODE') + for pr in prs: + owner, _ = pr.repository.fp_remote_target.split('/', 1) + source = pr.source_id or pr + root = pr.root_id + + message = source.message + '\n\n' + '\n'.join( + "Forward-Port-Of: %s" % p.display_name + for p in root | source + ) + + title, body = re.match(r'(?P<title>[^\n]+)\n*(?P<body>.*)', message, flags=re.DOTALL).groups() + r = gh.post(f'https://api.github.com/repos/{pr.repository.name}/pulls', json={ + 'base': target.name, + 'head': f'{owner}:{new_branch}', + 'title': '[FW]' + (' ' if title[0] != '[' else '') + title, + 'body': body + }) + if not r.ok: + _logger.warning("Failed to create forward-port PR for %s, deleting branches", pr.display_name) + # delete all the branches this should automatically close the + # PRs if we've created any. Using the API here is probably + # simpler than going through the working copies + for repo in prs.mapped('repository'): + d = gh.delete(f'https://api.github.com/repos/{repo.fp_remote_target}/git/refs/heads/{new_branch}') + if d.ok: + _logger.info("Deleting %s:%s=success", repo.fp_remote_target, new_branch) + else: + _logger.warning("Deleting %s:%s=%s", repo.fp_remote_target, new_branch, d.text) + raise RuntimeError(f"Forwardport failure: {pr.display_name} ({r.text})") + + new_pr = PRs._from_gh(r.json()) + _logger.info("Created forward-port PR %s", new_pr) + new_batch |= new_pr + + # allows PR author to close or skipci + new_pr.write({ + 'merge_method': pr.merge_method, + 'source_id': source.id, + # only link to previous PR of sequence if cherrypick passed + 'parent_id': pr.id if not has_conflicts else False, + 'detach_reason': "conflicts:\n{}".format('\n\n'.join( + f"{out}\n{err}".strip() + for _, out, err, _ in filter(None, conflicts.values()) + )) if has_conflicts else None, + }) + if has_conflicts and pr.parent_id and pr.state not in ('merged', 'closed'): + self.env.ref('runbot_merge.forwardport.failure.conflict')._send( + repository=pr.repository, + pull_request=pr.number, + token_field='fp_github_token', + format_args={'source': source, 'pr': pr, 'new': new_pr, 'footer': FOOTER}, + ) + + for pr, new_pr in zip(prs, new_batch): + new_pr._fp_conflict_feedback(pr, conflicts) + + labels = ['forwardport'] + if has_conflicts: + labels.append('conflict') + self.env['runbot_merge.pull_requests.tagging'].create({ + 'repository': new_pr.repository.id, + 'pull_request': new_pr.number, + 'tags_add': labels, + }) + + new_batch = new_batch.batch_id + new_batch.parent_id = self + # try to schedule followup + new_batch._schedule_fp_followup() + return new_batch + + def _schedule_fp_followup(self, *, force_fw=False): + _logger = logging.getLogger(__name__).getChild('forwardport.next') + # if the PR has a parent and is CI-validated, enqueue the next PR + scheduled = self.browse(()) + for batch in self: + prs = ', '.join(batch.prs.mapped('display_name')) + _logger.info('Checking if forward-port %s (%s)', batch, prs) + # in cas of conflict or update individual PRs will "lose" their + # parent, which should prevent forward porting + # + # even if we force_fw, a *followup* should still only be for forward + # ports so check that the batch has a parent (which should be the + # same thing as all the PRs having a source, kinda, but cheaper, + # it's not entirely true as technically the user could have added a + # PR to the forward ported batch + if not (batch.parent_id and force_fw or all(p.parent_id for p in batch.prs)): + _logger.info('-> no parent %s (%s)', batch, prs) + continue + if not force_fw and batch.source.fw_policy != 'skipci' \ + and (invalid := batch.prs.filtered(lambda p: p.state not in ['validated', 'ready'])): + _logger.info( + '-> wrong state %s (%s)', + batch, + ', '.join(f"{p.display_name}: {p.state}" for p in invalid), + ) + continue + + # check if we've already forward-ported this branch + next_target = batch._find_next_targets() + if not next_target: + _logger.info("-> forward port done (no next target)") + continue + if len(next_target) > 1: + _logger.error( + "-> cancelling forward-port of %s (%s): inconsistent next target branch (%s)", + batch, + prs, + ', '.join(next_target.mapped('name')), + ) + + if n := self.search([ + ('target', '=', next_target.id), + ('parent_id', '=', batch.id), + ], limit=1): + _logger.info('-> already forward-ported (%s)', n) + continue + + _logger.info("check pending port for %s (%s)", batch, prs) + if self.env['forwardport.batches'].search_count([('batch_id', '=', batch.id)]): + _logger.warning('-> already recorded') + continue + + _logger.info('-> ok') + self.env['forwardport.batches'].create({ + 'batch_id': batch.id, + 'source': 'fp', + }) + scheduled |= batch + return scheduled + + def _find_next_target(self): + """Retrieves the next target from every PR, and returns it if it's the + same for all the PRs which have one (PRs without a next target are + ignored, this is considered acceptable). + + If the next targets are inconsistent, returns no next target. + """ + next_target = self._find_next_targets() + if len(next_target) == 1: + return next_target + else: + return self.env['runbot_merge.branch'].browse(()) + + def _find_next_targets(self): + return self.prs.mapped(lambda p: p._find_next_target() or self.env['runbot_merge.branch']) + + def write(self, vals): + if vals.get('merge_date'): + # TODO: remove condition when everything is merged + remover = self.env.get('forwardport.branch_remover') + if remover is not None: + remover.create([ + {'pr_id': p.id} + for b in self + if not b.merge_date + for p in b.prs + ]) + + if vals.get('fw_policy') == 'skipci': + nonskip = self.filtered(lambda b: b.fw_policy != 'skipci') + else: + nonskip = self.browse(()) + super().write(vals) + + # if we change the policy to skip CI, schedule followups on merged + # batches which were not previously marked as skipping CI + if nonskip: + toggled = nonskip.filtered(lambda b: b.merge_date) + tips = toggled.mapped(lambda b: b.genealogy_ids[-1:]) + for tip in tips: + tip._schedule_fp_followup() + + return True + + @api.ondelete(at_uninstall=True) + def _on_delete_clear_stagings(self): + self.batch_staging_ids.unlink() + + def unlink(self): + """ + batches can be unlinked if they: + + - have run out of PRs + - and don't have a parent batch (which is not being deleted) + - and don't have a child batch (which is not being deleted) + + this is to keep track of forward port histories at the batch level + """ + unlinkable = self.filtered( + lambda b: not (b.prs or (b.parent_id - self) or (self.search([('parent_id', '=', b.id)]) - self)) + ) + return super(Batch, unlinkable).unlink() diff --git a/runbot_merge/models/commands.py b/runbot_merge/models/commands.py new file mode 100644 index 00000000..fdee31c6 --- /dev/null +++ b/runbot_merge/models/commands.py @@ -0,0 +1,386 @@ +import enum +from collections.abc import Iterator +from dataclasses import dataclass, field +from functools import partial +from operator import contains +from typing import Callable, List, Optional, Union, Tuple + + +def tokenize(line: str) -> Iterator[str]: + cur = '' + for c in line: + if c == '-' and not cur: + yield '-' + elif c in ' \t+=,': + if cur: + yield cur + cur = '' + if not c.isspace(): + yield c + else: + cur += c + + if cur: + yield cur + + +def normalize(it: Iterator[str]) -> Iterator[str]: + """Converts shorthand tokens to expanded version + """ + for t in it: + match t: + case 'r': + yield 'review' + case 'r-': + yield 'review' + yield '-' + case _: + yield t + + +@dataclass +class Peekable(Iterator[str]): + it: Iterator[str] + memo: Optional[str] = None + + def __iter__(self) -> Iterator[str]: + return self + + def __next__(self) -> str: + if self.memo is not None: + v, self.memo = self.memo, None + return v + return next(self.it) + + def peek(self) -> Optional[str]: + if self.memo is None: + self.memo = next(self.it, None) + return self.memo + + +class CommandError(Exception): + pass + + +class Approve: + def __init__(self, ids: Optional[List[int]] = None) -> None: + self.ids = ids + + def __str__(self) -> str: + if self.ids is not None: + ids = ','.join(map(str, self.ids)) + return f"r={ids}" + return 'review+' + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield "r(eview)+", "approves the PR, if it's a forwardport also approves all non-detached parents" + yield "r(eview)=<number>", "only approves the specified parents" + +class Reject: + def __str__(self) -> str: + return 'review-' + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield "r(eview)-", "removes approval of a previously approved PR, if the PR is staged the staging will be cancelled" + +class MergeMethod(enum.Enum): + SQUASH = 'squash' + REBASE_FF = 'rebase-ff' + REBASE_MERGE = 'rebase-merge' + MERGE = 'merge' + + def __str__(self) -> str: + return self.value + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield str(cls.MERGE), "integrate the PR with a simple merge commit, using the PR description as message" + yield str(cls.REBASE_MERGE), "rebases the PR on top of the target branch the integrates with a merge commit, using the PR description as message" + yield str(cls.REBASE_FF), "rebases the PR on top of the target branch, then fast-forwards" + yield str(cls.SQUASH), "squashes the PR as a single commit on the target branch, using the PR description as message" + + +class Retry: + def __str__(self) -> str: + return 'retry' + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield "retry", 're-tries staging a PR in the "error" state' + + +class Check: + def __str__(self) -> str: + return 'check' + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield "check", "fetches or refreshes PR metadata, resets mergebot state" + + +@dataclass +class Override: + statuses: List[str] = field(default_factory=list) + + def __str__(self) -> str: + return f"override={','.join(self.statuses)}" + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield "override=<...>", "marks overridable statuses as successful" + + +@dataclass +class Delegate: + users: List[str] = field(default_factory=list) + + def __str__(self) -> str: + if not self.users: + return 'delegate+' + return f"delegate={','.join(self.users)}" + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield "delegate+", "grants approval rights to the PR author" + yield "delegate=<...>", "grants approval rights on this PR to the specified github users" + + +class Priority(enum.Enum): + DEFAULT = enum.auto() + PRIORITY = enum.auto() + ALONE = enum.auto() + + def __str__(self) -> str: + return self.name.lower() + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield str(cls.DEFAULT), "stages the PR normally" + yield str(cls.PRIORITY), "tries to stage this PR first, then adds `default` PRs if the staging has room" + yield str(cls.ALONE), "stages this PR only with other PRs of the same priority" + + +class CancelStaging: + def __str__(self) -> str: + return "cancel=staging" + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield "cancel=staging", "automatically cancels the current staging when this PR becomes ready" + + +class SkipChecks: + def __str__(self) -> str: + return 'skipchecks' + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield "skipchecks", "bypasses both statuses and review" + + +class FW(enum.Enum): + DEFAULT = enum.auto() + NO = enum.auto() + SKIPCI = enum.auto() + SKIPMERGE = enum.auto() + + def __str__(self) -> str: + return f'fw={self.name.lower()}' + + @classmethod + def help(cls, is_reviewer: bool) -> Iterator[Tuple[str, str]]: + yield str(cls.NO), "does not forward-port this PR" + if is_reviewer: + yield str(cls.DEFAULT), "forward-ports this PR normally" + yield str(cls.SKIPCI), "does not wait for a forward-port's statuses to succeed before creating the next one" + + +@dataclass +class Limit: + branch: Optional[str] + + def __str__(self) -> str: + if self.branch is None: + return 'ignore' + return f'up to {self.branch}' + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield "up to <branch>", "only ports this PR forward to the specified branch (included)" + + +class Close: + def __str__(self) -> str: + return 'close' + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield str(cls()), "closes this forward-port" + + +class Help: + def __str__(self) -> str: + return 'help' + + @classmethod + def help(cls, _: bool) -> Iterator[Tuple[str, str]]: + yield str(cls()), "displays this help" + + +Command = Union[ + Approve, + CancelStaging, + Close, + Check, + Delegate, + FW, + Help, + Limit, + MergeMethod, + Override, + Priority, + Reject, + Retry, + SkipChecks, +] + + +class Parser: + def __init__(self, line: str) -> None: + self.it = Peekable(normalize(tokenize(line))) + + def __iter__(self) -> Iterator[Command]: + for token in self.it: + if token.startswith("NOW"): + # any number of ! is allowed + if token.startswith("NOW!"): + yield Priority.ALONE + elif token == "NOW": + yield Priority.PRIORITY + else: + raise CommandError(f"unknown command {token!r}") + yield SkipChecks() + yield CancelStaging() + continue + + handler = getattr(type(self), f'parse_{token.replace("-", "_")}', None) + if handler: + yield handler(self) + elif '!' in token: + raise CommandError("no need to scream") + else: + raise CommandError(f"unknown command {token!r}") + + def assert_next(self, val: str) -> None: + if (actual := next(self.it, None)) != val: + raise CommandError(f"expected {val!r}, got {actual!r}") + + def check_next(self, val: str) -> bool: + if self.it.peek() == val: + self.it.memo = None # consume peeked value + return True + return False + + def parse_review(self) -> Union[Approve, Reject]: + t = next(self.it, None) + if t == '+': + return Approve() + if t == '-': + return Reject() + if t == '=': + t = next(self.it, None) + if not (t and t.isdecimal()): + raise CommandError(f"expected PR ID to approve, found {t!r}") + + ids = [int(t)] + while self.check_next(','): + id = next(self.it, None) + if id and id.isdecimal(): + ids.append(int(id)) + else: + raise CommandError(f"expected PR ID to approve, found {id!r}") + return Approve(ids) + + raise CommandError(f"unknown review {t!r}") + + def parse_squash(self) -> MergeMethod: + return MergeMethod.SQUASH + + def parse_rebase_ff(self) -> MergeMethod: + return MergeMethod.REBASE_FF + + def parse_rebase_merge(self) -> MergeMethod: + return MergeMethod.REBASE_MERGE + + def parse_merge(self) -> MergeMethod: + return MergeMethod.MERGE + + def parse_retry(self) -> Retry: + return Retry() + + def parse_check(self) -> Check: + return Check() + + def parse_override(self) -> Override: + self.assert_next('=') + ci = [next(self.it)] + while self.check_next(','): + ci.append(next(self.it)) + return Override(ci) + + def parse_delegate(self) -> Delegate: + match next(self.it, None): + case '+': + return Delegate() + case '=': + delegates = [next(self.it).lstrip('#@')] + while self.check_next(','): + delegates.append(next(self.it).lstrip('#@')) + return Delegate(delegates) + case d: + raise CommandError(f"unknown delegation {d!r}") + + def parse_default(self) -> Priority: + return Priority.DEFAULT + + def parse_priority(self) -> Priority: + return Priority.PRIORITY + + def parse_alone(self) -> Priority: + return Priority.ALONE + + def parse_cancel(self) -> CancelStaging: + self.assert_next('=') + self.assert_next('staging') + return CancelStaging() + + def parse_skipchecks(self) -> SkipChecks: + return SkipChecks() + + def parse_fw(self) -> FW: + self.assert_next('=') + f = next(self.it, "") + try: + if f in ('disable', 'disabled'): + return FW.NO + return FW[f.upper()] + except KeyError: + raise CommandError(f"unknown fw configuration {f or None!r}") from None + + def parse_ignore(self) -> Limit: + return Limit(None) + + def parse_up(self) -> Limit: + self.assert_next('to') + if limit := next(self.it, None): + return Limit(limit) + else: + raise CommandError("please provide a branch to forward-port to") + + def parse_close(self) -> Close: + return Close() + + def parse_help(self) -> Help: + return Help() diff --git a/runbot_merge/models/crons/__init__.py b/runbot_merge/models/crons/__init__.py new file mode 100644 index 00000000..f3eed923 --- /dev/null +++ b/runbot_merge/models/crons/__init__.py @@ -0,0 +1,2 @@ +from . import git_maintenance +from . import cleanup_scratch_branches diff --git a/runbot_merge/models/crons/cleanup_scratch_branches.py b/runbot_merge/models/crons/cleanup_scratch_branches.py new file mode 100644 index 00000000..cfe05f32 --- /dev/null +++ b/runbot_merge/models/crons/cleanup_scratch_branches.py @@ -0,0 +1,33 @@ +import logging + +from odoo import models + + +_logger = logging.getLogger(__name__) +class BranchCleanup(models.TransientModel): + _name = 'runbot_merge.branch_cleanup' + _description = "cleans up scratch refs for deactivated branches" + + def _run(self): + domain = [('active', '=', False)] + if lastcall := self.env.context['lastcall']: + domain.append(('write_date', '>=', lastcall)) + deactivated = self.env['runbot_merge.branch'].search(domain) + + _logger.info( + "deleting scratch (tmp and staging) refs for branches %s", + ', '.join(b.name for b in deactivated) + ) + # loop around the repos first, so we can reuse the gh instance + for r in deactivated.mapped('project_id.repo_ids'): + gh = r.github() + for b in deactivated: + if b.project_id != r.project_id: + continue + + res = gh('delete', f'git/refs/heads/tmp.{b.name}', check=False) + if res.status_code != 204: + _logger.info("no tmp branch found for %s:%s", r.name, b.name) + res = gh('delete', f'git/refs/heads/staging.{b.name}', check=False) + if res.status_code != 204: + _logger.info("no staging branch found for %s:%s", r.name, b.name) diff --git a/runbot_merge/models/crons/cleanup_scratch_branches.xml b/runbot_merge/models/crons/cleanup_scratch_branches.xml new file mode 100644 index 00000000..e9172f6d --- /dev/null +++ b/runbot_merge/models/crons/cleanup_scratch_branches.xml @@ -0,0 +1,23 @@ +<odoo> + <record id="access_branch_cleanup" model="ir.model.access"> + <field name="name">Access to branch cleanup is useless</field> + <field name="model_id" ref="model_runbot_merge_branch_cleanup"/> + <field name="perm_read">0</field> + <field name="perm_create">0</field> + <field name="perm_write">0</field> + <field name="perm_unlink">0</field> + </record> + + <record model="ir.cron" id="branch_cleanup"> + <field name="name">Removal of scratch refs for deactivated branch</field> + <field name="model_id" ref="model_runbot_merge_branch_cleanup"/> + <field name="state">code</field> + <field name="code">model._run()</field> + <!-- + nota: even though this is only triggered, numbercall has to be + non-zero because the counter is taken in account by cron triggers + --> + <field name="numbercall">-1</field> + <field name="doall" eval="False"/> + </record> +</odoo> diff --git a/runbot_merge/models/crons/git_maintenance.py b/runbot_merge/models/crons/git_maintenance.py new file mode 100644 index 00000000..3c34adfa --- /dev/null +++ b/runbot_merge/models/crons/git_maintenance.py @@ -0,0 +1,44 @@ +import logging +import subprocess + +from odoo import models +from ...git import get_local + + +_gc = logging.getLogger(__name__) +class GC(models.TransientModel): + _name = 'runbot_merge.maintenance' + _description = "Weekly maintenance of... cache repos?" + + def _run(self): + # lock out crons which use the local repo cache to avoid concurrency + # issues while we're GC-ing it + Stagings = self.env['runbot_merge.stagings'] + crons = self.env.ref('runbot_merge.staging_cron', Stagings) | self.env.ref('forwardport.port_forward', Stagings) + if crons: + self.env.cr.execute(""" + SELECT 1 FROM ir_cron + WHERE id = any(%s) + FOR UPDATE + """, [crons.ids]) + + # run on all repos with a forwardport target (~ forwardport enabled) + for repo in self.env['runbot_merge.repository'].search([]): + repo_git = get_local(repo, clone=False) + if not repo_git: + continue + + _gc.info('Running maintenance on %s', repo.name) + r = repo_git\ + .stdout(True)\ + .with_config(stderr=subprocess.STDOUT, text=True, check=False)\ + .remote('prune', 'origin') + if r.returncode: + _gc.warning("Prune failure (status=%d):\n%s", r.returncode, r.stdout) + + r = repo_git\ + .stdout(True)\ + .with_config(stderr=subprocess.STDOUT, text=True, check=False)\ + .gc('--prune=now', aggressive=True) + if r.returncode: + _gc.warning("GC failure (status=%d):\n%s", r.returncode, r.stdout) diff --git a/runbot_merge/models/crons/git_maintenance.xml b/runbot_merge/models/crons/git_maintenance.xml new file mode 100644 index 00000000..6190834a --- /dev/null +++ b/runbot_merge/models/crons/git_maintenance.xml @@ -0,0 +1,26 @@ +<odoo> + <record id="access_forwardport_maintenance" model="ir.model.access"> + <field name="name">Access to maintenance is useless</field> + <field name="model_id" ref="model_runbot_merge_maintenance"/> + <field name="perm_read">0</field> + <field name="perm_create">0</field> + <field name="perm_write">0</field> + <field name="perm_unlink">0</field> + </record> + + <record model="ir.cron" id="maintenance"> + <field name="name">Maintenance of repo cache</field> + <field name="model_id" ref="model_runbot_merge_maintenance"/> + <field name="state">code</field> + <field name="code">model._run()</field> + <!-- + run sunday morning as it can take a while, unlikely someone will need + to stage or forward-port stuff at that point + --> + <field name="nextcall" eval="datetime.utcnow() + relativedelta(weekday=6, hour=2, minute=0, second=0, microsecond=0)"/> + <field name="interval_number">1</field> + <field name="interval_type">weeks</field> + <field name="numbercall">-1</field> + <field name="doall" eval="False"/> + </record> +</odoo> diff --git a/runbot_merge/models/events_sources.py b/runbot_merge/models/events_sources.py new file mode 100644 index 00000000..34db75ac --- /dev/null +++ b/runbot_merge/models/events_sources.py @@ -0,0 +1,12 @@ +from odoo import models, fields + + +class EventsSources(models.Model): + _name = 'runbot_merge.events_sources' + _description = 'Valid Webhook Event Sources' + _order = "repository" + _rec_name = "repository" + + # FIXME: unique repo? Or allow multiple secrets per repo? + repository = fields.Char(index=True, required=True) + secret = fields.Char() diff --git a/runbot_merge/models/mail_thread.py b/runbot_merge/models/mail_thread.py new file mode 100644 index 00000000..85cfbb12 --- /dev/null +++ b/runbot_merge/models/mail_thread.py @@ -0,0 +1,33 @@ +from collections import ChainMap + +from odoo import models +from odoo.tools import ConstantMapping + + +class MailThread(models.AbstractModel): + _inherit = 'mail.thread' + + def _message_compute_author(self, author_id=None, email_from=None, raise_on_email=True): + if author_id is None and self: + mta = self.env.cr.precommit.data.get(f'mail.tracking.author.{self._name}', {}) + authors = self.env['res.partner'].union(*(p for r in self if (p := mta.get(r.id)))) + if len(authors) == 1: + author_id = authors.id + v = super()._message_compute_author(author_id, email_from, raise_on_email) + return v + + def _track_set_author(self, author, *, fallback=False): + """ Set the author of the tracking message. """ + if not self._track_get_fields(): + return + authors = self.env.cr.precommit.data.setdefault(f'mail.tracking.author.{self._name}', {}) + if fallback: + details = authors + if isinstance(authors, ChainMap): + details = authors.maps[0] + self.env.cr.precommit.data[f'mail.tracking.author.{self._name}'] = ChainMap( + details, + ConstantMapping(author), + ) + else: + return super()._track_set_author(author) diff --git a/runbot_merge/models/project.py b/runbot_merge/models/project.py index b484cdcf..e36ad0c5 100644 --- a/runbot_merge/models/project.py +++ b/runbot_merge/models/project.py @@ -1,7 +1,14 @@ import logging import re +from typing import List -from odoo import models, fields +import requests +import sentry_sdk + +from odoo import models, fields, api +from odoo.exceptions import UserError +from odoo.osv import expression +from odoo.tools import reverse_order _logger = logging.getLogger(__name__) class Project(models.Model): @@ -19,6 +26,14 @@ class Project(models.Model): help="Branches of all project's repos which are managed by the merge bot. Also "\ "target branches of PR this project handles." ) + staging_enabled = fields.Boolean(default=True) + staging_priority = fields.Selection([ + ('default', "Splits over ready PRs"), + ('largest', "Largest of split and ready PRs"), + ('ready', "Ready PRs over split"), + ], default="default", required=True) + staging_statuses = fields.Boolean(default=True) + staging_rpc = fields.Boolean(default=False) ci_timeout = fields.Integer( default=60, required=True, group_operator=None, @@ -26,30 +41,92 @@ class Project(models.Model): ) github_token = fields.Char("Github Token", required=True) + github_name = fields.Char(store=True, compute="_compute_identity") + github_email = fields.Char(store=True, compute="_compute_identity") github_prefix = fields.Char( required=True, default="hanson", # mergebot du bot du bot du~ help="Prefix (~bot name) used when sending commands from PR " - "comments e.g. [hanson retry] or [hanson r+ p=1]" + "comments e.g. [hanson retry] or [hanson r+ priority]", ) + fp_github_token = fields.Char() + fp_github_name = fields.Char(store=True, compute="_compute_git_identity") batch_limit = fields.Integer( default=8, group_operator=None, help="Maximum number of PRs staged together") - secret = fields.Char( - help="Webhook secret. If set, will be checked against the signature " - "of (valid) incoming webhook signatures, failing signatures " - "will lead to webhook rejection. Should only use ASCII." - ) - freeze_id = fields.Many2one('runbot_merge.project.freeze', compute='_compute_freeze') freeze_reminder = fields.Text() - def _check_stagings(self, commit=False): - for branch in self.search([]).mapped('branch_ids').filtered('active'): - staging = branch.active_staging_id - if not staging: + uniquifier = fields.Boolean( + default=True, + help="Whether to add a uniquifier commit on repositories without PRs" + " during staging. The lack of uniquifier can lead to CI conflicts" + " as github works off of commits, so it's possible for an" + " unrelated build to trigger a failure if somebody is a dummy and" + " includes repos they have no commit for." + ) + + @api.depends('github_token') + def _compute_identity(self): + s = requests.Session() + for project in self: + if not project.github_token or (project.github_name and project.github_email): continue + + r0 = s.get('https://api.github.com/user', headers={ + 'Authorization': 'token %s' % project.github_token + }) + if not r0.ok: + _logger.error("Failed to fetch merge bot information for project %s: %s", project.name, r0.text or r0.content) + continue + + r = r0.json() + project.github_name = r['name'] or r['login'] + if email := r['email']: + project.github_email = email + continue + + if 'user:email' not in set(re.split(r',\s*', r0.headers['x-oauth-scopes'])): + raise UserError("The merge bot github token needs the user:email scope to fetch the bot's identity.") + r1 = s.get('https://api.github.com/user/emails', headers={ + 'Authorization': 'token %s' % project.github_token + }) + if not r1.ok: + _logger.error("Failed to fetch merge bot emails for project %s: %s", project.name, r1.text or r1.content) + continue + project.github_email = next(( + entry['email'] + for entry in r1.json() + if entry['primary'] + ), None) + if not project.github_email: + raise UserError("The merge bot needs a public or accessible primary email set up.") + + # technically the email could change at any moment... + @api.depends('fp_github_token') + def _compute_git_identity(self): + s = requests.Session() + for project in self: + if project.fp_github_name or not project.fp_github_token: + continue + + r0 = s.get('https://api.github.com/user', headers={ + 'Authorization': 'token %s' % project.fp_github_token + }) + if not r0.ok: + _logger.error("Failed to fetch forward bot information for project %s: %s", project.name, r0.text or r0.content) + continue + + user = r0.json() + project.fp_github_name = user['name'] or user['login'] + + def _check_stagings(self, commit=False): + # check branches with an active staging + for branch in self.env['runbot_merge.branch']\ + .with_context(active_test=False)\ + .search([('active_staging_id', '!=', False)]): + staging = branch.active_staging_id try: with self.env.cr.savepoint(): staging.check_status() @@ -61,23 +138,41 @@ class Project(models.Model): self.env.cr.commit() def _create_stagings(self, commit=False): - for branch in self.search([]).mapped('branch_ids').filtered('active'): - if not branch.active_staging_id: - try: - with self.env.cr.savepoint(): - branch.try_staging() - except Exception: - _logger.exception("Failed to create staging for branch %r", branch.name) - else: - if commit: - self.env.cr.commit() + from .stagings_create import try_staging - def _find_commands(self, comment): + # look up branches which can be staged on and have no active staging + for branch in self.env['runbot_merge.branch'].search([ + ('active_staging_id', '=', False), + ('active', '=', True), + ('staging_enabled', '=', True), + ('project_id.staging_enabled', '=', True), + ]): + try: + with self.env.cr.savepoint(), \ + sentry_sdk.start_span(description=f'create staging {branch.name}') as span: + span.set_tag('branch', branch.name) + try_staging(branch) + except Exception: + _logger.exception("Failed to create staging for branch %r", branch.name) + else: + if commit: + self.env.cr.commit() + + def _find_commands(self, comment: str) -> List[str]: + """Tries to find all the lines starting (ignoring leading whitespace) + with either the merge or the forward port bot identifiers. + + For convenience, the identifier *can* be prefixed with an ``@`` or + ``#``, and suffixed with a ``:``. + """ + # horizontal whitespace (\s - {\n, \r}), but Python doesn't have \h or \p{Blank} + h = r'[^\S\r\n]' return re.findall( - r'^\s*[@|#]?{}:? (.*)$'.format(self.github_prefix), + fr'^{h}*[@|#]?{self.github_prefix}(?:{h}+|:{h}*)(.*)$', comment, re.MULTILINE | re.IGNORECASE) def _has_branch(self, name): + self.env['runbot_merge.branch'].flush_model(['project_id', 'name']) self.env.cr.execute(""" SELECT 1 FROM runbot_merge_branch WHERE project_id = %s AND name = %s @@ -121,3 +216,10 @@ class Project(models.Model): ] }) return w.action_open() + + def _forward_port_ordered(self, domain=()): + Branches = self.env['runbot_merge.branch'] + return Branches.search(expression.AND([ + [('project_id', '=', self.id)], + domain or [], + ]), order=reverse_order(Branches._order)) diff --git a/runbot_merge/models/project_freeze/__init__.py b/runbot_merge/models/project_freeze/__init__.py index c6bc4330..5699314f 100644 --- a/runbot_merge/models/project_freeze/__init__.py +++ b/runbot_merge/models/project_freeze/__init__.py @@ -1,18 +1,19 @@ -import contextlib import enum import itertools import json import logging -import time from collections import Counter +from typing import Dict from markupsafe import Markup from odoo import models, fields, api, Command -from odoo.addons.runbot_merge.exceptions import FastForwardError from odoo.exceptions import UserError from odoo.tools import drop_view_if_exists +from ... import git +from ..pull_requests import Repository + _logger = logging.getLogger(__name__) class FreezeWizard(models.Model): _name = 'runbot_merge.project.freeze' @@ -177,11 +178,13 @@ class FreezeWizard(models.Model): if self.errors: return self.action_open() - conflict_crons = self.env.ref('runbot_merge.merge_cron') | self.env.ref('runbot_merge.staging_cron') + conflict_crons = self.env.ref('runbot_merge.merge_cron')\ + | self.env.ref('runbot_merge.staging_cron')\ + | self.env.ref('runbot_merge.process_updated_commits') # we don't want to run concurrently to the crons above, though we # don't need to prevent read access to them self.env.cr.execute( - 'SELECT * FROM ir_cron WHERE id =ANY(%s) FOR SHARE NOWAIT', + 'SELECT FROM ir_cron WHERE id =ANY(%s) FOR SHARE NOWAIT', [conflict_crons.ids] ) @@ -190,6 +193,12 @@ class FreezeWizard(models.Model): # everything so the new branch is the second one, just after the branch # it "forks" master, rest = project_id.branch_ids[0], project_id.branch_ids[1:] + if self.bump_pr_ids and master.active_staging_id: + self.env.cr.execute( + 'SELECT FROM runbot_merge_stagings WHERE id = %s FOR UPDATE NOWAIT', + [master.active_staging_id.id] + ) + seq = itertools.count(start=1) # start reseq at 1 commands = [ (1, master.id, {'sequence': next(seq)}), @@ -203,50 +212,65 @@ class FreezeWizard(models.Model): master_name = master.name gh_sessions = {r: r.github() for r in self.project_id.repo_ids} + repos: Dict[Repository, git.Repo] = { + r: git.get_local(r).check(False) + for r in self.project_id.repo_ids + } + for repo, copy in repos.items(): + copy.fetch(git.source_url(repo), '+refs/heads/*:refs/heads/*') + all_prs = self.release_pr_ids.pr_id | self.bump_pr_ids.pr_id + for pr in all_prs: + repos[pr.repository].fetch( + git.source_url(pr.repository), + pr.head, + ) # prep new branch (via tmp refs) on every repo - rel_heads = {} + rel_heads: Dict[Repository, str] = {} # store for master heads as odds are high the bump pr(s) will be on the # same repo as one of the release PRs - prevs = {} + prevs: Dict[Repository, str] = {} for rel in self.release_pr_ids: repo_id = rel.repository_id gh = gh_sessions[repo_id] try: prev = prevs[repo_id] = gh.head(master_name) - except Exception: - raise UserError(f"Unable to resolve branch {master_name} of repository {repo_id.name} to a commit.") + except Exception as e: + raise UserError(f"Unable to resolve branch {master_name} of repository {repo_id.name} to a commit.") from e - # create the tmp branch to merge the PR into - tmp_branch = f'tmp.{self.branch_name}' try: - gh.set_ref(tmp_branch, prev) - except Exception as err: - raise UserError(f"Unable to create branch {self.branch_name} of repository {repo_id.name}: {err}.") + commits = gh.commits(rel.pr_id.number) + except Exception as e: + raise UserError(f"Unable to fetch commits of release PR {rel.pr_id.display_name}.") from e - rel_heads[repo_id], _ = gh.rebase(rel.pr_id.number, tmp_branch) - time.sleep(1) + _logger.debug("rebasing %s on %s (commits=%s)", + rel.pr_id.display_name, prev, len(commits)) + rel_heads[repo_id] = repos[repo_id].rebase(prev, commits)[0] # prep bump - bump_heads = {} + bump_heads: Dict[Repository, str] = {} for bump in self.bump_pr_ids: repo_id = bump.repository_id gh = gh_sessions[repo_id] try: prev = prevs[repo_id] = prevs.get(repo_id) or gh.head(master_name) - except Exception: - raise UserError(f"Unable to resolve branch {master_name} of repository {repo_id.name} to a commit.") + except Exception as e: + raise UserError(f"Unable to resolve branch {master_name} of repository {repo_id.name} to a commit.") from e - # create the tmp branch to merge the PR into - tmp_branch = f'tmp.{master_name}' try: - gh.set_ref(tmp_branch, prev) - except Exception as err: - raise UserError(f"Unable to create branch {master_name} of repository {repo_id.name}: {err}.") + commits = gh.commits(bump.pr_id.number) + except Exception as e: + raise UserError(f"Unable to fetch commits of bump PR {bump.pr_id.display_name}.") from e - bump_heads[repo_id], _ = gh.rebase(bump.pr_id.number, tmp_branch) - time.sleep(1) + _logger.debug("rebasing %s on %s (commits=%s)", + bump.pr_id.display_name, prev, len(commits)) + bump_heads[repo_id] = repos[repo_id].rebase(prev, commits)[0] + + # prevent concurrent updates to the commits table so we control the + # creation of commit objects from rebasing the release & bump PRs, do it + # only just before *pushing* + self.env.cr.execute("LOCK runbot_merge_commit IN ACCESS EXCLUSIVE MODE NOWAIT") deployed = {} # at this point we've got a bunch of tmp branches with merged release @@ -256,38 +280,39 @@ class FreezeWizard(models.Model): failure = None for rel in self.release_pr_ids: repo_id = rel.repository_id - # helper API currently has no API to ensure we're just creating a - # new branch (as cheaply as possible) so do it by hand - status = None - with contextlib.suppress(Exception): - status = gh_sessions[repo_id].create_ref(self.branch_name, rel_heads[repo_id]) - deployed[rel.pr_id.id] = rel_heads[repo_id] - to_delete.append(repo_id) - if status != 201: + if repos[repo_id].push( + git.source_url(repo_id), + f'{rel_heads[repo_id]}:refs/heads/{self.branch_name}', + ).returncode: failure = ('create', repo_id.name, self.branch_name) break + + deployed[rel.pr_id.id] = rel_heads[repo_id] + to_delete.append(repo_id) else: # all release deployments succeeded for bump in self.bump_pr_ids: repo_id = bump.repository_id - try: - gh_sessions[repo_id].fast_forward(master_name, bump_heads[repo_id]) - deployed[bump.pr_id.id] = bump_heads[repo_id] - to_revert.append(repo_id) - except FastForwardError: + if repos[repo_id].push( + git.source_url(repo_id), + f'{bump_heads[repo_id]}:refs/heads/{master_name}' + ).returncode: failure = ('fast-forward', repo_id.name, master_name) break + deployed[bump.pr_id.id] = bump_heads[repo_id] + to_revert.append(repo_id) + if failure: addendums = [] # creating the branch failed, try to delete all previous branches failures = [] for prev_id in to_revert: - revert = gh_sessions[prev_id]('PATCH', f'git/refs/heads/{master_name}', json={ - 'sha': prevs[prev_id], - 'force': True - }, check=False) - if not revert.ok: + if repos[prev_id].push( + '-f', + git.source_url(prev_id), + f'{prevs[prev_id]}:refs/heads/{master_name}', + ).returncode: failures.append(prev_id.name) if failures: addendums.append( @@ -297,8 +322,10 @@ class FreezeWizard(models.Model): failures.clear() for prev_id in to_delete: - deletion = gh_sessions[prev_id]('DELETE', f'git/refs/heads/{self.branch_name}', check=False) - if not deletion.ok: + if repos[prev_id].push( + git.source_url(prev_id), + f':refs/heads/{self.branch_name}' + ).returncode: failures.append(prev_id.name) if failures: addendums.append( @@ -317,8 +344,83 @@ class FreezeWizard(models.Model): f"Unable to {reason} branch {repo}:{branch}.{addendum}" ) - all_prs = self.release_pr_ids.pr_id | self.bump_pr_ids.pr_id - all_prs.state = 'merged' + b = self.env['runbot_merge.branch'].search([('name', '=', self.branch_name)]) + # We specifically don't want to modified() or anything. + self.env.cr.execute( + "UPDATE runbot_merge_batch SET target=%s WHERE id = %s;" + "UPDATE runbot_merge_pull_requests SET target=%s WHERE id = any(%s)", + [ + b.id, self.release_pr_ids.pr_id.batch_id.id, + b.id, self.release_pr_ids.pr_id.ids, + ] + ) + all_prs.batch_id.merge_date = fields.Datetime.now() + all_prs.reviewed_by = self.env.user.partner_id.id + for p in all_prs: + p.commits_map = json.dumps({ + '': deployed[p.id], + p.head: deployed[p.id] + }) + + # stagings have to be created conditionally as otherwise we might not + # have a `target` to set and it's mandatory + laster = self.env['runbot_merge.stagings'].search( + [('target', '=', master.id), ('state', '=', 'success')], + order='id desc', + limit=1, + ).commits.mapped(lambda c: (c.repository_id, c.commit_id)) + if self.release_pr_ids: + rel_items = [(0, 0, { + 'repository_id': repo.id, + 'commit_id': self.env['runbot_merge.commit'].create({ + 'sha': sha, + 'to_check': False, + }).id, + } if (sha := rel_heads.get(repo)) else { + 'repository_id': repo.id, + 'commit_id': commit.id, + }) + for repo, commit in laster + ] + self.env['runbot_merge.stagings'].create([{ + 'state': 'success', + 'reason': 'release freeze staging', + 'active': False, + 'target': b.id, + 'staging_batch_ids': [ + (0, 0, {'runbot_merge_batch_id': batch.id}) + for batch in self.release_pr_ids.pr_id.batch_id + ], + 'heads': rel_items, + 'commits': rel_items, + }]) + + if self.bump_pr_ids: + bump_items = [(0, 0, { + 'repository_id': repo.id, + 'commit_id': self.env['runbot_merge.commit'].create({ + 'sha': sha, + 'to_check': False, + }).id, + } if (sha := bump_heads.get(repo)) else { + 'repository_id': repo.id, + 'commit_id': commit.id, + }) + for repo, commit in laster + ] + self.env['runbot_merge.stagings'].create([{ + 'state': 'success', + 'reason': 'bump freeze staging', + 'active': False, + 'target': master.id, + 'staging_batch_ids': [ + (0, 0, {'runbot_merge_batch_id': batch.id}) + for batch in self.bump_pr_ids.pr_id.batch_id + ], + 'heads': bump_items, + 'commits': bump_items, + }]) + self.env['runbot_merge.pull_requests.feedback'].create([{ 'repository': pr.repository.id, 'pull_request': pr.number, @@ -460,7 +562,7 @@ class OpenPRLabels(models.Model): def init(self): super().init() - drop_view_if_exists(self.env.cr, "runbot_merge_freeze_labels"); + drop_view_if_exists(self.env.cr, "runbot_merge_freeze_labels") self.env.cr.execute(""" CREATE VIEW runbot_merge_freeze_labels AS ( SELECT DISTINCT ON (label) diff --git a/runbot_merge/models/project_freeze/views.xml b/runbot_merge/models/project_freeze/views.xml index a2f8bfc3..d243edcd 100644 --- a/runbot_merge/models/project_freeze/views.xml +++ b/runbot_merge/models/project_freeze/views.xml @@ -6,7 +6,7 @@ <form js_class="freeze_wizard"> <sheet> <div class="alert alert-warning" role="alert" - attrs="{'invisible': [('errors', '=', False)]}"> + invisible="not errors"> <field name="errors" readonly="True"/> </div> <group> @@ -59,9 +59,9 @@ the style of the button if the form has "no errors" --> <button string="Freeze" type="object" name="action_freeze" - class="btn-success" attrs="{'invisible': [('errors', '!=', False)]}"/> + class="btn-success" invisible="errors"/> <button string="Freeze" type="object" name="action_freeze" - class="btn-primary" attrs="{'invisible': [('errors', '=', False)]}"/> + class="btn-primary" invisible="not errors"/> <button string="Save & Close" special="save"/> <button string="Cancel" type="object" name="action_cancel" class="btn-warning"/> </footer> diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index 8be1070b..7d2eddcd 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -1,36 +1,34 @@ -# coding: utf-8 +from __future__ import annotations import ast -import base64 import collections import contextlib import datetime -import io import itertools import json import logging -import os -import pprint import re import time +from functools import reduce +from operator import itemgetter +from typing import Optional, Union, List, Iterator, Tuple -from difflib import Differ -from itertools import takewhile - -import requests +import psycopg2.errors +import sentry_sdk import werkzeug -from werkzeug.datastructures import Headers +from markupsafe import Markup -from odoo import api, fields, models, tools -from odoo.exceptions import ValidationError +from odoo import api, fields, models, tools, Command +from odoo.exceptions import AccessError, UserError from odoo.osv import expression -from odoo.tools import OrderedSet +from odoo.tools import html_escape, Reverse +from . import commands +from .utils import enum, readonly, dfm from .. import github, exceptions, controllers, utils -WAIT_FOR_VISIBILITY = [10, 10, 10, 10] - _logger = logging.getLogger(__name__) +FOOTER = '\nMore info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port\n' class StatusConfiguration(models.Model): @@ -65,9 +63,11 @@ class Repository(models.Model): _name = _description = 'runbot_merge.repository' _order = 'sequence, id' + id: int + sequence = fields.Integer(default=50, group_operator=None) name = fields.Char(required=True) - project_id = fields.Many2one('runbot_merge.project', required=True) + project_id = fields.Many2one('runbot_merge.project', required=True, index=True) status_ids = fields.One2many('runbot_merge.repository.status', 'repo_id', string="Required Statuses") group_id = fields.Many2one('res.groups', default=lambda self: self.env.ref('base.group_user')) @@ -80,15 +80,16 @@ class Repository(models.Model): All substitutions are tentatively applied sequentially to the input. """) - @api.model - def create(self, vals): - if 'status_ids' in vals: - return super().create(vals) + @api.model_create_multi + def create(self, vals_list): + for vals in vals_list: + if 'status_ids' in vals: + continue - st = vals.pop('required_statuses', 'legal/cla,ci/runbot') - if st: - vals['status_ids'] = [(0, 0, {'context': c}) for c in st.split(',')] - return super().create(vals) + st = vals.pop('required_statuses', 'legal/cla,ci/runbot') + if st: + vals['status_ids'] = [(0, 0, {'context': c}) for c in st.split(',')] + return super().create(vals_list) def write(self, vals): st = vals.pop('required_statuses', None) @@ -96,7 +97,7 @@ All substitutions are tentatively applied sequentially to the input. vals['status_ids'] = [(5, 0, {})] + [(0, 0, {'context': c}) for c in st.split(',')] return super().write(vals) - def github(self, token_field='github_token'): + def github(self, token_field='github_token') -> github.GH: return github.GH(self.project_id[token_field], self.name) def _auto_init(self): @@ -105,26 +106,30 @@ All substitutions are tentatively applied sequentially to the input. self._cr, 'runbot_merge_unique_repo', self._table, ['name']) return res - def _load_pr(self, number): + def _load_pr(self, number, *, closing=False): gh = self.github() # fetch PR object and handle as *opened* issue, pr = gh.pr(number) - feedback = self.env['runbot_merge.pull_requests.feedback'].create + repo_name = pr['base']['repo']['full_name'] if not self.project_id._has_branch(pr['base']['ref']): - _logger.info("Tasked with loading PR %d for un-managed branch %s:%s, ignoring", - number, self.name, pr['base']['ref']) - feedback({ - 'repository': self.id, - 'pull_request': number, - 'message': "Branch `{}` is not within my remit, imma just ignore it.".format(pr['base']['ref']), - }) + _logger.info("Tasked with loading %s PR %s#%d for un-managed branch %s:%s, ignoring", + pr['state'], repo_name, number, self.name, pr['base']['ref']) + if not closing: + self.env.ref('runbot_merge.pr.load.unmanaged')._send( + repository=self, + pull_request=number, + format_args = { + 'pr': pr, + 'repository': self, + }, + ) return # if the PR is already loaded, force sync a few attributes pr_id = self.env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', pr['base']['repo']['full_name']), + ('repository.name', '=', repo_name), ('number', '=', number), ]) if pr_id: @@ -143,20 +148,30 @@ All substitutions are tentatively applied sequentially to the input. }, 'sender': {'login': self.project_id.github_prefix}, }) - feedback({ + edit2 = '' + if pr_id.draft != pr['draft']: + edit2 = controllers.handle_pr(self.env, { + 'action': 'converted_to_draft' if pr['draft'] else 'ready_for_review', + 'pull_request': pr, + 'sender': {'login': self.project_id.github_prefix} + }) + '. ' + if pr_id.state != 'closed' and pr['state'] == 'closed': + # don't go through controller because try_closing does weird things + # for safety / race condition reasons which ends up committing + # and breaks everything + pr_id.state = 'closed' + self.env['runbot_merge.pull_requests.feedback'].create({ 'repository': pr_id.repository.id, 'pull_request': number, - 'message': f"{edit}. {sync}.", + 'message': f"{edit}. {edit2}{sync}.", }) return - feedback({ - 'repository': self.id, - 'pull_request': number, - 'message': "%sI didn't know about this PR and had to retrieve " - "its information, you may have to re-approve it as " - "I didn't see previous commands." % pr_id.ping() - }) + # special case for closed PRs, just ignore all events and skip feedback + if closing: + self.env['runbot_merge.pull_requests']._from_gh(pr) + return + sender = {'login': self.project_id.github_prefix} # init the PR to the null commit so we can later synchronise it back # back to the "proper" head while resetting reviews @@ -205,14 +220,21 @@ All substitutions are tentatively applied sequentially to the input. 'pull_request': pr, 'sender': sender, }) + pr_id = self.env['runbot_merge.pull_requests'].search([ + ('repository.name', '=', repo_name), + ('number', '=', number), + ]) if pr['state'] == 'closed': # don't go through controller because try_closing does weird things # for safety / race condition reasons which ends up committing # and breaks everything - self.env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', pr['base']['repo']['full_name']), - ('number', '=', number), - ]).state = 'closed' + pr_id.closed = True + + self.env.ref('runbot_merge.pr.load.fetched')._send( + repository=self, + pull_request=number, + format_args={'pr': pr_id}, + ) def having_branch(self, branch): branches = self.env['runbot_merge.branch'].search @@ -234,11 +256,13 @@ class Branch(models.Model): _name = _description = 'runbot_merge.branch' _order = 'sequence, name' + id: int + name = fields.Char(required=True) - project_id = fields.Many2one('runbot_merge.project', required=True) + project_id = fields.Many2one('runbot_merge.project', required=True, index=True) active_staging_id = fields.Many2one( - 'runbot_merge.stagings', compute='_compute_active_staging', store=True, + 'runbot_merge.stagings', compute='_compute_active_staging', store=True, index=True, help="Currently running staging for the branch." ) staging_ids = fields.One2many('runbot_merge.stagings', 'target') @@ -252,6 +276,8 @@ class Branch(models.Model): active = fields.Boolean(default=True) sequence = fields.Integer(group_operator=None) + staging_enabled = fields.Boolean(default=True) + def _auto_init(self): res = super(Branch, self)._auto_init() tools.create_unique_index( @@ -259,24 +285,25 @@ class Branch(models.Model): self._table, ['name', 'project_id']) return res - @api.depends('active') + @api.depends('name', 'active', 'project_id.name') def _compute_display_name(self): - super()._compute_display_name() - for b in self.filtered(lambda b: not b.active): - b.display_name += ' (inactive)' + for b in self: + b.display_name = f"{b.project_id.name}:{b.name}" + ('' if b.active else ' (inactive)') def write(self, vals): - super().write(vals) - if vals.get('active') is False: - self.active_staging_id.cancel( + if vals.get('active') is False and (actives := self.filtered('active')): + actives.active_staging_id.cancel( "Target branch deactivated by %r.", self.env.user.login, ) + tmpl = self.env.ref('runbot_merge.pr.branch.disabled') self.env['runbot_merge.pull_requests.feedback'].create([{ 'repository': pr.repository.id, 'pull_request': pr.number, - 'message': f'Hey {pr.ping()}the target branch {pr.target.name!r} has been disabled, you may want to close this PR.', - } for pr in self.prs]) + 'message': tmpl._format(pr=pr), + } for pr in actives.prs]) + self.env.ref('runbot_merge.branch_cleanup')._trigger() + super().write(vals) return True @api.depends('staging_ids.active') @@ -284,230 +311,49 @@ class Branch(models.Model): for b in self: b.active_staging_id = b.with_context(active_test=True).staging_ids - def _ready(self): - self.env.cr.execute(""" - SELECT - min(pr.priority) as priority, - array_agg(pr.id) AS match - FROM runbot_merge_pull_requests pr - WHERE pr.target = any(%s) - -- exclude terminal states (so there's no issue when - -- deleting branches & reusing labels) - AND pr.state != 'merged' - AND pr.state != 'closed' - GROUP BY - pr.target, - CASE - WHEN pr.label SIMILAR TO '%%:patch-[[:digit:]]+' - THEN pr.id::text - ELSE pr.label - END - HAVING - bool_or(pr.state = 'ready') or bool_or(pr.priority = 0) - ORDER BY min(pr.priority), min(pr.id) - """, [self.ids]) - browse = self.env['runbot_merge.pull_requests'].browse - return [(p, browse(ids)) for p, ids in self.env.cr.fetchall()] - def _stageable(self): - return [ - (p, prs) - for p, prs in self._ready() - if not any(prs.mapped('blocked')) - ] +class SplitOffWizard(models.TransientModel): + _name = "runbot_merge.pull_requests.split_off" + _description = "wizard to split a PR off of its current batch and into a different one" - def try_staging(self): - """ Tries to create a staging if the current branch does not already - have one. Returns None if the branch already has a staging or there - is nothing to stage, the newly created staging otherwise. - """ - logger = _logger.getChild('cron') + pr_id = fields.Many2one("runbot_merge.pull_requests", required=True) + new_label = fields.Char(string="New Label") - logger.info( - "Checking %s (%s) for staging: %s, skip? %s", - self, self.name, - self.active_staging_id, - bool(self.active_staging_id) - ) - if self.active_staging_id: - return + def button_apply(self): + self.pr_id._split_off(self.new_label) + self.unlink() + return {'type': 'ir.actions.act_window_close'} - rows = self._stageable() - priority = rows[0][0] if rows else -1 - if priority == 0 or priority == 1: - # p=0 take precedence over all else - # p=1 allows merging a fix inside / ahead of a split (e.g. branch - # is broken or widespread false positive) without having to cancel - # the existing staging - batched_prs = [pr_ids for _, pr_ids in takewhile(lambda r: r[0] == priority, rows)] - elif self.split_ids: - split_ids = self.split_ids[0] - logger.info("Found split of PRs %s, re-staging", split_ids.mapped('batch_ids.prs')) - batched_prs = [batch.prs for batch in split_ids.batch_ids] - split_ids.unlink() - else: # p=2 - batched_prs = [pr_ids for _, pr_ids in takewhile(lambda r: r[0] == priority, rows)] - - if not batched_prs: - return - - Batch = self.env['runbot_merge.batch'] - staged = Batch - original_heads = {} - meta = {repo: {} for repo in self.project_id.repo_ids.having_branch(self)} - for repo, it in meta.items(): - gh = it['gh'] = repo.github() - it['head'] = original_heads[repo] = gh.head(self.name) - # create tmp staging branch - gh.set_ref('tmp.{}'.format(self.name), it['head']) - - batch_limit = self.project_id.batch_limit - first = True - for batch in batched_prs: - if len(staged) >= batch_limit: - break - try: - staged |= Batch.stage(meta, batch) - except exceptions.MergeError as e: - pr = e.args[0] - _logger.exception("Failed to merge %s into staging branch", pr.display_name) - if first or isinstance(e, exceptions.Unmergeable): - if len(e.args) > 1 and e.args[1]: - reason = e.args[1] - else: - reason = e.__context__ - # if the reason is a json document, assume it's a github - # error and try to extract the error message to give it to - # the user - with contextlib.suppress(Exception): - reason = json.loads(str(reason))['message'].lower() - - pr.state = 'error' - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': pr.repository.id, - 'pull_request': pr.number, - 'message': f'{pr.ping()}unable to stage: {reason}', - }) - else: - first = False - - if not staged: - return - - heads = {} - for repo, it in meta.items(): - tree = it['gh'].commit(it['head'])['tree'] - # ensures staging branches are unique and always - # rebuilt - r = base64.b64encode(os.urandom(12)).decode('ascii') - trailer = '' - if heads: - trailer = '\n'.join( - 'Runbot-dependency: %s:%s' % (repo, h) - for repo, h in heads.items() - if not repo.endswith('^') - ) - dummy_head = {'sha': it['head']} - if it['head'] == original_heads[repo]: - # if the repo has not been updated by the staging, create a - # dummy commit to force rebuild - dummy_head = it['gh']('post', 'git/commits', json={ - 'message': '''force rebuild - -uniquifier: %s -For-Commit-Id: %s -%s''' % (r, it['head'], trailer), - 'tree': tree['sha'], - 'parents': [it['head']], - }).json() - - # $repo is the head to check, $repo^ is the head to merge (they - # might be the same) - heads[repo.name + '^'] = it['head'] - heads[repo.name] = dummy_head['sha'] - self.env.cr.execute( - "INSERT INTO runbot_merge_commit (sha, to_check, statuses) " - "VALUES (%s, true, '{}') " - "ON CONFLICT (sha) DO UPDATE SET to_check=true", - [dummy_head['sha']] - ) - - # create actual staging object - st = self.env['runbot_merge.stagings'].create({ - 'target': self.id, - 'batch_ids': [(4, batch.id, 0) for batch in staged], - 'heads': json.dumps(heads) - }) - # create staging branch from tmp - token = self.project_id.github_token - for r in self.project_id.repo_ids.having_branch(self): - it = meta[r] - staging_head = heads[r.name] - _logger.info( - "%s: create staging for %s:%s at %s", - self.project_id.name, r.name, self.name, - staging_head - ) - refname = 'staging.{}'.format(self.name) - it['gh'].set_ref(refname, staging_head) - # asserts that the new head is visible through the api - head = it['gh'].head(refname) - assert head == staging_head,\ - "[api] updated %s:%s to %s but found %s" % ( - r.name, refname, - staging_head, head, - ) - - i = itertools.count() - @utils.backoff(delays=WAIT_FOR_VISIBILITY, exc=TimeoutError) - def wait_for_visibility(): - if self._check_visibility(r, refname, staging_head, token): - _logger.info( - "[repo] updated %s:%s to %s: ok (at %d/%d)", - r.name, refname, staging_head, - next(i), len(WAIT_FOR_VISIBILITY) - ) - return - _logger.warning( - "[repo] updated %s:%s to %s: failed (at %d/%d)", - r.name, refname, staging_head, - next(i), len(WAIT_FOR_VISIBILITY) - ) - raise TimeoutError("Staged head not updated after %d seconds" % sum(WAIT_FOR_VISIBILITY)) - - logger.info("Created staging %s (%s) to %s", st, ', '.join( - '%s[%s]' % (batch, batch.prs) - for batch in staged - ), st.target.name) - return st - - def _check_visibility(self, repo, branch_name, expected_head, token): - """ Checks the repository actual to see if the new / expected head is - now visible - """ - # v1 protocol provides URL for ref discovery: https://github.com/git/git/blob/6e0cc6776106079ed4efa0cc9abace4107657abf/Documentation/technical/http-protocol.txt#L187 - # for more complete client this is also the capabilities discovery and - # the "entry point" for the service - url = 'https://github.com/{}.git/info/refs?service=git-upload-pack'.format(repo.name) - with requests.get(url, stream=True, auth=(token, '')) as resp: - if not resp.ok: - return False - for head, ref in parse_refs_smart(resp.raw.read): - if ref != ('refs/heads/' + branch_name): - continue - return head == expected_head - return False ACL = collections.namedtuple('ACL', 'is_admin is_reviewer is_author') class PullRequests(models.Model): - _name = _description = 'runbot_merge.pull_requests' + _name = 'runbot_merge.pull_requests' + _description = "Pull Request" + _inherit = ['mail.thread'] _order = 'number desc' _rec_name = 'number' - target = fields.Many2one('runbot_merge.branch', required=True, index=True) + id: int + display_name: str + + target = fields.Many2one('runbot_merge.branch', required=True, index=True, tracking=True) + target_sequence = fields.Integer(related='target.sequence') repository = fields.Many2one('runbot_merge.repository', required=True) + project = fields.Many2one(related='repository.project_id') # NB: check that target & repo have same project & provide project related? + closed = fields.Boolean(default=False, tracking=True) + error = fields.Boolean(string="in error", default=False, tracking=True) + skipchecks = fields.Boolean(related='batch_id.skipchecks', inverse='_inverse_skipchecks') + cancel_staging = fields.Boolean(related='batch_id.cancel_staging') + merge_date = fields.Datetime( + related='batch_id.merge_date', + inverse=readonly, + readonly=True, + tracking=True, + store=True, + ) + state = fields.Selection([ ('opened', 'Opened'), ('closed', 'Closed'), @@ -517,46 +363,72 @@ class PullRequests(models.Model): # staged? ('merged', 'Merged'), ('error', 'Error'), - ], default='opened', index=True) + ], + compute='_compute_state', + inverse=readonly, + readonly=True, + store=True, + index=True, + tracking=True, + column_type=enum(_name, 'state'), + ) number = fields.Integer(required=True, index=True, group_operator=None) - author = fields.Many2one('res.partner') - head = fields.Char(required=True) + author = fields.Many2one('res.partner', index=True) + head = fields.Char(required=True, tracking=True) label = fields.Char( - required=True, index=True, + required=True, index=True, tracking=True, help="Label of the source branch (owner:branchname), used for " "cross-repository branch-matching" ) + refname = fields.Char(compute='_compute_refname') message = fields.Text(required=True) - draft = fields.Boolean(default=False, required=True) - squash = fields.Boolean(default=False) + message_html = fields.Html(compute='_compute_message_html', sanitize=False) + draft = fields.Boolean( + default=False, required=True, tracking=True, + help="A draft PR can not be merged", + ) + squash = fields.Boolean(default=False, tracking=True) merge_method = fields.Selection([ ('merge', "merge directly, using the PR as merge commit message"), ('rebase-merge', "rebase and merge, using the PR as merge commit message"), ('rebase-ff', "rebase and fast-forward"), ('squash', "squash"), - ], default=False) + ], default=False, tracking=True, column_type=enum(_name, 'merge_method')) method_warned = fields.Boolean(default=False) - reviewed_by = fields.Many2one('res.partner') + reviewed_by = fields.Many2one('res.partner', index=True, tracking=True) delegates = fields.Many2many('res.partner', help="Delegate reviewers, not intrinsically reviewers but can review this PR") - priority = fields.Integer(default=2, index=True, group_operator=None) + priority = fields.Selection(related="batch_id.priority", inverse=readonly, readonly=True) - overrides = fields.Char(required=True, default='{}') - statuses = fields.Text( - compute='_compute_statuses', - help="Copy of the statuses from the HEAD commit, as a Python literal" - ) + overrides = fields.Char(required=True, default='{}', tracking=True) + statuses = fields.Text(help="Copy of the statuses from the HEAD commit, as a Python literal", default="{}") statuses_full = fields.Text( compute='_compute_statuses', - help="Compilation of the full status of the PR (commit statuses + overrides), as JSON" + help="Compilation of the full status of the PR (commit statuses + overrides), as JSON", + store=True, ) - status = fields.Char(compute='_compute_statuses') + status = fields.Selection([ + ('pending', 'Pending'), + ('failure', 'Failure'), + ('success', 'Success'), + ], compute='_compute_statuses', store=True, inverse=readonly, readonly=True, column_type=enum(_name, 'status')) previous_failure = fields.Char(default='{}') - batch_id = fields.Many2one('runbot_merge.batch', string="Active Batch", compute='_compute_active_batch', store=True) - batch_ids = fields.Many2many('runbot_merge.batch', string="Batches", context={'active_test': False}) - staging_id = fields.Many2one(related='batch_id.staging_id', store=True) + batch_id = fields.Many2one('runbot_merge.batch', index=True) + staging_id = fields.Many2one('runbot_merge.stagings', compute='_compute_staging', inverse=readonly, readonly=True, store=True) + staging_ids = fields.Many2many('runbot_merge.stagings', string="Stagings", compute='_compute_stagings', inverse=readonly, readonly=True, context={"active_test": False}) + + @api.depends('batch_id.batch_staging_ids.runbot_merge_stagings_id.active') + def _compute_staging(self): + for p in self: + p.staging_id = p.batch_id.staging_ids.filtered('active') + + @api.depends('batch_id.batch_staging_ids.runbot_merge_stagings_id') + def _compute_stagings(self): + for p in self: + p.staging_ids = p.batch_id.staging_ids + commits_map = fields.Char(help="JSON-encoded mapping of PR commits to actually integrated commits. The integration head (either a merge commit or the PR's topmost) is mapped from the 'empty' pr commit (the key is an empty string, because you can't put a null key in json maps).", default='{}') link_warned = fields.Boolean( @@ -565,7 +437,7 @@ class PullRequests(models.Model): ) blocked = fields.Char( - compute='_compute_is_blocked', + compute='_compute_is_blocked', store=True, help="PR is not currently stageable for some reason (mostly an issue if status is ready)" ) @@ -575,16 +447,44 @@ class PullRequests(models.Model): repo_name = fields.Char(related='repository.name') message_title = fields.Char(compute='_compute_message_title') - def ping(self, author=True, reviewer=True): - P = self.env['res.partner'] - s = ' '.join( - f'@{p.github_login}' - for p in (self.author if author else P) | (self.reviewed_by if reviewer else P) - if p - ) - if s: - s += ' ' - return s + ping = fields.Char(compute='_compute_ping', recursive=True) + + source_id = fields.Many2one('runbot_merge.pull_requests', index=True, help="the original source of this FP even if parents were detached along the way") + parent_id = fields.Many2one( + 'runbot_merge.pull_requests', index=True, + help="a PR with a parent is an automatic forward port", + tracking=True, + ) + root_id = fields.Many2one('runbot_merge.pull_requests', compute='_compute_root', recursive=True) + forwardport_ids = fields.One2many('runbot_merge.pull_requests', 'source_id') + limit_id = fields.Many2one('runbot_merge.branch', help="Up to which branch should this PR be forward-ported", tracking=True) + + detach_reason = fields.Char() + + _sql_constraints = [( + 'fw_constraint', + 'check(source_id is null or num_nonnulls(parent_id, detach_reason) = 1)', + "fw PRs must either be attached or have a reason for being detached", + )] + + @api.depends('label') + def _compute_refname(self): + for pr in self: + pr.refname = pr.label.split(':', 1)[-1] + + @api.depends( + 'author.github_login', 'reviewed_by.github_login', + 'source_id.author.github_login', 'source_id.reviewed_by.github_login', + ) + def _compute_ping(self): + for pr in self: + if source := pr.source_id: + contacts = source.author | source.reviewed_by | pr.reviewed_by + else: + contacts = pr.author | pr.reviewed_by + + s = ' '.join(f'@{p.github_login}' for p in contacts) + pr.ping = s and (s + ' ') @api.depends('repository.name', 'number') def _compute_url(self): @@ -595,20 +495,45 @@ class PullRequests(models.Model): pr.url = str(base.join(path)) pr.github_url = str(gh_base.join(path)) + @api.depends('parent_id.root_id') + def _compute_root(self): + for p in self: + p.root_id = reduce(lambda _, p: p, self._iter_ancestors()) + @api.depends('message') def _compute_message_title(self): for pr in self: pr.message_title = next(iter(pr.message.splitlines()), '') + @api.depends("message") + def _compute_message_html(self): + for pr in self: + match pr.message.split('\n\n', 1): + case [title]: + pr.message_html = Markup('<h3>%s<h3>') % title + case [title, description]: + pr.message_html = Markup('<h3>%s</h3>\n%s') % ( + title, + dfm(pr.repository.name, description), + ) + case _: + pr.message_html = "" + @api.depends('repository.name', 'number', 'message') def _compute_display_name(self): - return super(PullRequests, self)._compute_display_name() - - def name_get(self): name_template = '%(repo_name)s#%(number)d' if self.env.context.get('pr_include_title'): name_template += ' (%(message_title)s)' - return [(p.id, name_template % p) for p in self] + + for p in self: + p.display_name = name_template % p + + def _inverse_skipchecks(self): + for p in self: + p.batch_id.skipchecks = p.skipchecks + if p.skipchecks: + p.reviewed_by = self.env.user.partner_id + @api.model def name_search(self, name='', args=None, operator='ilike', limit=100): @@ -625,14 +550,11 @@ class PullRequests(models.Model): domain = expression.OR(bits) if args: domain = expression.AND([args, domain]) - return self.search(domain, limit=limit).sudo().name_get() + return self.search(domain, limit=limit).sudo().mapped(lambda r: (r.id, r.display_name)) @property def _approved(self): - return self.state in ('approved', 'ready') or any( - p.priority == 0 - for p in (self | self._linked_prs) - ) + return self.state in ('approved', 'ready') @property def _ready(self): @@ -640,101 +562,68 @@ class PullRequests(models.Model): @property def _linked_prs(self): - if re.search(r':patch-\d+', self.label): - return self.browse(()) - if self.state == 'merged': - return self.with_context(active_test=False).batch_ids\ - .filtered(lambda b: b.staging_id.state == 'success')\ - .prs - self - return self.search([ - ('target', '=', self.target.id), - ('label', '=', self.label), - ('state', 'not in', ('merged', 'closed')), - ]) - self + return self.batch_id.prs - self - # missing link to other PRs - @api.depends('priority', 'state', 'squash', 'merge_method', 'batch_id.active', 'label') + @property + def limit_pretty(self): + if self.limit_id: + return self.limit_id.name + + branches = self.repository.project_id.branch_ids + if ((bf := self.repository.branch_filter) or '[]') != '[]': + branches = branches.filtered_domain(ast.literal_eval(bf)) + return branches[:1].name + + @api.depends( + 'batch_id.prs.draft', + 'batch_id.prs.squash', + 'batch_id.prs.merge_method', + 'batch_id.prs.state', + 'batch_id.skipchecks', + ) def _compute_is_blocked(self): self.blocked = False + requirements = ( + lambda p: not p.draft, + lambda p: p.squash or p.merge_method, + lambda p: p.state == 'ready' \ + or p.batch_id.skipchecks \ + and all(pr.state != 'error' for pr in p.batch_id.prs) + ) + messages = ('is in draft', 'has no merge method', 'is not ready') for pr in self: if pr.state in ('merged', 'closed'): continue - linked = pr._linked_prs - # check if PRs are configured (single commit or merge method set) - if not (pr.squash or pr.merge_method): - pr.blocked = 'has no merge method' - continue - other_unset = next((p for p in linked if not (p.squash or p.merge_method)), None) - if other_unset: - pr.blocked = "linked PR %s has no merge method" % other_unset.display_name - continue + blocking, message = next(( + (blocking, message) + for blocking in pr.batch_id.prs + for requirement, message in zip(requirements, messages) + if not requirement(blocking) + ), (None, None)) + if blocking == pr: + pr.blocked = message + elif blocking: + pr.blocked = f"linked PR {blocking.display_name} {message}" - # check if any PR in the batch is p=0 and none is in error - if any(p.priority == 0 for p in (pr | linked)): - if pr.state == 'error': - pr.blocked = "in error" - other_error = next((p for p in linked if p.state == 'error'), None) - if other_error: - pr.blocked = "linked pr %s in error" % other_error.display_name - # if none is in error then none is blocked because p=0 - # "unblocks" the entire batch - continue - - if pr.state != 'ready': - pr.blocked = 'not ready' - continue - - unready = next((p for p in linked if p.state != 'ready'), None) - if unready: - pr.blocked = 'linked pr %s is not ready' % unready.display_name - continue - - def _get_overrides(self): + def _get_overrides(self) -> dict[str, dict[str, str]]: + if self.parent_id: + return self.parent_id._get_overrides() | json.loads(self.overrides) if self: return json.loads(self.overrides) return {} - @api.depends('head', 'repository.status_ids', 'overrides') - def _compute_statuses(self): - Commits = self.env['runbot_merge.commit'] - for pr in self: - c = Commits.search([('sha', '=', pr.head)]) - st = json.loads(c.statuses or '{}') - statuses = {**st, **pr._get_overrides()} - pr.statuses_full = json.dumps(statuses) - if not statuses: - pr.status = pr.statuses = False - continue - - pr.statuses = pprint.pformat(st) - - st = 'success' - for ci in pr.repository.status_ids._for_pr(pr): - v = state_(statuses, ci.context) or 'pending' - if v in ('error', 'failure'): - st = 'failure' - break - if v == 'pending': - st = 'pending' - pr.status = st - - @api.depends('batch_ids.active') - def _compute_active_batch(self): - for r in self: - r.batch_id = r.batch_ids.filtered(lambda b: b.active)[:1] - - def _get_or_schedule(self, repo_name, number, target=None): + def _get_or_schedule(self, repo_name, number, *, target=None, closing=False): repo = self.env['runbot_merge.repository'].search([('name', '=', repo_name)]) if not repo: return if target and not repo.project_id._has_branch(target): - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': repo.id, - 'pull_request': number, - 'message': "I'm sorry. Branch `{}` is not within my remit.".format(target), - }) + self.env.ref('runbot_merge.pr.fetch.unmanaged')._send( + repository=repo, + pull_request=number, + format_args={'repository': repo, 'branch': target, 'number': number} + ) return pr = self.search([ @@ -750,243 +639,481 @@ class PullRequests(models.Model): Fetch.create({ 'repository': repo.id, 'number': number, + 'closing': closing, }) - def _parse_command(self, commandstring): - for m in re.finditer( - r'(\S+?)(?:([+-])|=(\S*))?(?=\s|$)', - commandstring, - ): - name, flag, param = m.groups() - if name == 'r': - name = 'review' - if flag in ('+', '-'): - yield name, flag == '+' - elif name == 'delegate': - if param: - for p in param.split(','): - yield 'delegate', p.lstrip('#@') - elif name == 'override': - if param: - for p in param.split(','): - yield 'override', p - elif name in ('p', 'priority'): - if param in ('0', '1', '2'): - yield ('priority', int(param)) - elif any(name == k for k, _ in type(self).merge_method.selection): - yield ('method', name) - else: - yield name, param + def _iter_ancestors(self) -> Iterator[PullRequests]: + while self: + yield self + self = self.parent_id + + def _iter_descendants(self) -> Iterator[PullRequests]: + pr = self + while pr := self.search([('parent_id', '=', pr.id)]): + yield pr def _parse_commands(self, author, comment, login): - """Parses a command string prefixed by Project::github_prefix. - - A command string can contain any number of space-separated commands: - - retry - resets a PR in error mode to ready for staging - r(eview)+/- - approves or disapproves a PR (disapproving just cancels an approval) - delegate+/delegate=<users> - adds either PR author or the specified (github) users as - authorised reviewers for this PR. ``<users>`` is a - comma-separated list of github usernames (no @) - p(riority)=2|1|0 - sets the priority to normal (2), pressing (1) or urgent (0). - Lower-priority PRs are selected first and batched together. - rebase+/- - Whether the PR should be rebased-and-merged (the default) or just - merged normally. - """ assert self, "parsing commands must be executed in an actual PR" (login, name) = (author.github_login, author.display_name) if author else (login, 'not in system') - is_admin, is_reviewer, is_author = self._pr_acl(author) - - commands = [ - ps - for m in self.repository.project_id._find_commands(comment['body'] or '') - for ps in self._parse_command(m) - ] - - if not commands: - _logger.info("found no commands in comment of %s (%s) (%s)", author.github_login, author.display_name, + commandlines = self.repository.project_id._find_commands(comment['body'] or '') + if not commandlines: + _logger.info("found no commands in comment of %s (%s) (%s)", login, name, utils.shorten(comment['body'] or '', 50) ) return 'ok' - Feedback = self.env['runbot_merge.pull_requests.feedback'] - if not (is_author or any(cmd == 'override' for cmd, _ in commands)): + def feedback(message: Optional[str] = None, close: bool = False): + self.env['runbot_merge.pull_requests.feedback'].create({ + 'repository': self.repository.id, + 'pull_request': self.number, + 'message': message, + 'close': close, + }) + + is_admin, is_reviewer, is_author = self._pr_acl(author) + _source_admin, source_reviewer, source_author = self.source_id._pr_acl(author) + # nota: 15.0 `has_group` completely doesn't work if the recordset is empty + super_admin = is_admin and author.user_ids and author.user_ids.has_group('runbot_merge.group_admin') + + help_list: list[type(commands.Command)] = list(filter(None, [ + commands.Help, + + (self.source_id and (source_author or source_reviewer) or is_reviewer) and not self.reviewed_by and commands.Approve, + (is_author or source_author) and self.reviewed_by and commands.Reject, + (is_author or source_author) and self.error and commands.Retry, + + is_author and not self.source_id and commands.FW, + is_author and commands.Limit, + source_author and self.source_id and commands.Close, + + is_reviewer and commands.MergeMethod, + is_reviewer and commands.Delegate, + + is_admin and commands.Priority, + super_admin and commands.SkipChecks, + is_admin and commands.CancelStaging, + + author.override_rights and commands.Override, + is_author and commands.Check, + ])) + def format_help(warn_ignore: bool, address: bool = True) -> str: + s = [ + 'Currently available commands{}:'.format( + f" for @{login}" if address else "" + ), + '', + '|command||', + '|-|-|', + ] + for command_type in help_list: + for cmd, text in command_type.help(is_reviewer): + s.append(f"|`{cmd}`|{text}|") + + s.extend(['', 'Note: this help text is dynamic and will change with the state of the PR.']) + if warn_ignore: + s.extend(["", "Warning: in invoking help, every other command has been ignored."]) + return "\n".join(s) + + try: + cmds: List[commands.Command] = [ + ps + for line in commandlines + for ps in commands.Parser(line.rstrip()) + ] + except Exception as e: + _logger.info( + "error %s while parsing comment of %s (%s): %s", + e, + login, name, + utils.shorten(comment['body'] or '', 50), + ) + feedback(message=f"""@{login} {e.args[0]}. + +For your own safety I've ignored *everything in your entire comment*. + +{format_help(False, address=False)} +""") + return 'error' + + if any(isinstance(cmd, commands.Help) for cmd in cmds): + self.env['runbot_merge.pull_requests.feedback'].create({ + 'repository': self.repository.id, + 'pull_request': self.number, + 'message': format_help(len(cmds) != 1), + }) + return "help" + + if not (is_author or self.source_id or (any(isinstance(cmd, commands.Override) for cmd in cmds) and author.override_rights)): # no point even parsing commands _logger.info("ignoring comment of %s (%s): no ACL to %s", login, name, self.display_name) - Feedback.create({ - 'repository': self.repository.id, - 'pull_request': self.number, - 'message': "I'm sorry, @{}. I'm afraid I can't do that.".format(login) - }) + self.env.ref('runbot_merge.command.access.no')._send( + repository=self.repository, + pull_request=self.number, + format_args={'user': login, 'pr': self} + ) return 'ignored' - applied, ignored = [], [] - def reformat(command, param): - if param is None: - pstr = '' - elif isinstance(param, bool): - pstr = '+' if param else '-' - elif isinstance(param, list): - pstr = '=' + ','.join(param) - else: - pstr = '={}'.format(param) - - return '%s%s' % (command, pstr) - msgs = [] - for command, param in commands: - ok = False + rejections = [] + for command in cmds: msg = None - if command == 'retry': - if is_author: - if self.state == 'error': - ok = True - self.state = 'ready' + match command: + case commands.Approve() if self.draft: + msg = "draft PRs can not be approved." + case commands.Approve() if self.source_id: + # rules are a touch different for forwardport PRs: + valid = lambda _: True if command.ids is None else lambda n: n in command.ids + _, source_reviewer, source_author = self.source_id._pr_acl(author) + + ancestors = list(self._iter_ancestors()) + # - reviewers on the original can approve any forward port + if source_reviewer: + approveable = ancestors + elif source_author: + # give full review rights on all forwardports (attached + # or not) to original author + approveable = ancestors + else: + # between the first merged ancestor and self + mergeors = list(itertools.dropwhile( + lambda p: p.state != 'merged', + reversed(ancestors), + )) + # between the first ancestor the current user can review and self + reviewors = list(itertools.dropwhile( + lambda p: not p._pr_acl(author).is_reviewer, + reversed(ancestors), + )) + + # source author can approve any descendant of a merged + # forward port (or source), people with review rights + # to a forward port have review rights to its + # descendants, if both apply use the most favorable + # (largest number of PRs) + if source_author and len(mergeors) > len(reviewors): + approveable = mergeors + else: + approveable = reviewors + + if approveable: + for pr in approveable: + if not (pr.state in RPLUS and valid(pr.number)): + continue + msg = pr._approve(author, login) + if msg: + break + else: + msg = f"you can't {command} you silly little bean." + case commands.Approve() if is_reviewer: + if command.ids is not None and command.ids != [self.number]: + msg = f"tried to approve PRs {command.ids} but the current PR is {self.number}" + else: + msg = self._approve(author, login) + case commands.Reject() if is_author or source_author: + if self.batch_id.skipchecks or self.reviewed_by: + if self.error: + self.error = False + if self.reviewed_by: + self.reviewed_by = False + if self.batch_id.skipchecks: + self.batch_id.skipchecks = False + self.env.ref("runbot_merge.command.unapprove.p0")._send( + repository=self.repository, + pull_request=self.number, + format_args={'user': login, 'pr': self}, + ) + if self.source_id: + feedback("Note that only this forward-port has been" + " unapproved, sibling forward ports may " + "have to be unapproved individually.") + self.unstage("unreviewed (r-) by %s", login) + else: + msg = "r- makes no sense in the current PR state." + case commands.MergeMethod() if is_reviewer: + self.merge_method = command.value + explanation = next(label for value, label in type(self).merge_method.selection if value == command.value) + self.env.ref("runbot_merge.command.method")._send( + repository=self.repository, + pull_request=self.number, + format_args={'new_method': explanation, 'pr': self, 'user': login}, + ) + case commands.Retry() if is_author or source_author: + if self.error: + self.error = False else: msg = "retry makes no sense when the PR is not in error." - elif command == 'check': - if is_author: + case commands.Check() if is_author: self.env['runbot_merge.fetch_job'].create({ 'repository': self.repository.id, 'number': self.number, }) - ok = True - elif command == 'review': - if self.draft: - msg = "draft PRs can not be approved." - elif param and is_reviewer: - oldstate = self.state - newstate = RPLUS.get(self.state) - if not author.email: - msg = "I must know your email before you can review PRs. Please contact an administrator." - elif not newstate: - msg = "this PR is already reviewed, reviewing it again is useless." + case commands.Delegate(users) if is_reviewer: + if not users: + delegates = self.author else: - self.state = newstate - self.reviewed_by = author - ok = True - _logger.debug( - "r+ on %s by %s (%s->%s) status=%s message? %s", - self.display_name, author.github_login, - oldstate, newstate or oldstate, - self.status, self.status == 'failure' - ) - if self.status == 'failure': - # the normal infrastructure is for failure and - # prefixes messages with "I'm sorry" - Feedback.create({ - 'repository': self.repository.id, - 'pull_request': self.number, - 'message': "@{} you may want to rebuild or fix this PR as it has failed CI.".format(login), - }) - elif not param and is_author: - newstate = RMINUS.get(self.state) - if self.priority == 0 or newstate: - if newstate: - self.state = newstate - if self.priority == 0: - self.priority = 1 - Feedback.create({ - 'repository': self.repository.id, - 'pull_request': self.number, - 'message': "PR priority reset to 1, as pull requests with priority 0 ignore review state.", + delegates = self.env['res.partner'] + for login in users: + delegates |= delegates.search([('github_login', '=', login)]) or delegates.create({ + 'name': login, + 'github_login': login, }) - self.unstage("unreviewed (r-) by %s", login) - ok = True - else: - msg = "r- makes no sense in the current PR state." - elif command == 'delegate': - if is_reviewer: - ok = True - Partners = self.env['res.partner'] - if param is True: - delegate = self.author - else: - delegate = Partners.search([('github_login', '=', param)]) or Partners.create({ - 'name': param, - 'github_login': param, - }) - delegate.write({'delegate_reviewer': [(4, self.id, 0)]}) - elif command == 'priority': - if is_admin: - ok = True - self.priority = param - if param == 0: + delegates.write({'delegate_reviewer': [(4, self.id, 0)]}) + case commands.Priority() if is_admin: + self.batch_id.priority = str(command) + case commands.SkipChecks() if super_admin: + self.batch_id.skipchecks = True + self.reviewed_by = author + if not (self.squash or self.merge_method): + self.env.ref('runbot_merge.check_linked_prs_status')._trigger() + + for p in self.batch_id.prs - self: + if not p.reviewed_by: + p.reviewed_by = author + case commands.CancelStaging() if is_admin: + self.batch_id.cancel_staging = True + if not self.batch_id.blocked: + if splits := self.target.split_ids: + splits.unlink() self.target.active_staging_id.cancel( - "P=0 on %s by %s, unstaging target %s", - self.display_name, - author.github_login, self.target.name, + "Unstaged by %s on %s", + author.github_login, self.display_name, ) - elif command == 'method': - if is_reviewer: - self.merge_method = param - ok = True - explanation = next(label for value, label in type(self).merge_method.selection if value == param) - Feedback.create({ - 'repository': self.repository.id, - 'pull_request': self.number, - 'message':"Merge method set to %s." % explanation - }) - elif command == 'override': - overridable = author.override_rights\ - .filtered(lambda r: not r.repository_id or (r.repository_id == self.repository))\ - .mapped('context') - if param in overridable: - self.overrides = json.dumps({ - **json.loads(self.overrides), - param: { - 'state': 'success', - 'target_url': comment['html_url'], - 'description': f"Overridden by @{author.github_login}", - }, - }) - c = self.env['runbot_merge.commit'].search([('sha', '=', self.head)]) - if c: - c.to_check = True - else: - c.create({'sha': self.head, 'statuses': '{}'}) - ok = True - else: - msg = "you are not allowed to override this status." - else: - # ignore unknown commands - continue + case commands.Override(statuses): + for status in statuses: + overridable = author.override_rights\ + .filtered(lambda r: not r.repository_id or (r.repository_id == self.repository))\ + .mapped('context') + if status in overridable: + self.overrides = json.dumps({ + **json.loads(self.overrides), + status: { + 'state': 'success', + 'target_url': comment['html_url'], + 'description': f"Overridden by @{author.github_login}", + }, + }) + c = self.env['runbot_merge.commit'].search([('sha', '=', self.head)]) + if c: + c.to_check = True + else: + c.create({'sha': self.head, 'statuses': '{}'}) + else: + msg = f"you are not allowed to override {status!r}." + # FW + case commands.Close() if source_author: + feedback(close=True) + case commands.FW(): + match command: + case commands.FW.NO if is_author or source_author: + message = "Disabled forward-porting." + case commands.FW.DEFAULT if is_author or source_author: + message = "Waiting for CI to create followup forward-ports." + case commands.FW.SKIPCI if is_reviewer or source_reviewer: + message = "Not waiting for CI to create followup forward-ports." + case commands.FW.SKIPMERGE if is_reviewer or source_reviewer: + message = "Not waiting for merge to create followup forward-ports." + case _: + msg = f"you don't have the right to {command}." - _logger.info( - "%s %s(%s) on %s by %s (%s)", - "applied" if ok else "ignored", - command, param, self.display_name, - author.github_login, author.display_name, - ) - if ok: - applied.append(reformat(command, param)) - else: - ignored.append(reformat(command, param)) - msgs.append(msg or "you can't {}.".format(reformat(command, param))) + if not msg: + (self.source_id or self).batch_id.fw_policy = command.name.lower() + feedback(message=message) + case commands.Limit(branch) if is_author: + if branch is None: + feedback(message="'ignore' is deprecated, use 'fw=no' to disable forward porting.") + limit = branch or self.target.name + for p in self.batch_id.prs: + ping, m = p._maybe_update_limit(limit) - if msgs: - joiner = ' ' if len(msgs) == 1 else '\n- ' - msgs.insert(0, "I'm sorry, @{}:".format(login)) - Feedback.create({ + if ping and p == self: + msg = m + else: + if ping: + m = f"@{login} {m}" + self.env['runbot_merge.pull_requests.feedback'].create({ + 'repository': p.repository.id, + 'pull_request': p.number, + 'message': m, + }) + case commands.Limit(): + msg = "you can't set a forward-port limit." + # NO! + case _: + msg = f"you can't {command}." + if msg is not None: + rejections.append(msg) + + cmdstr = ', '.join(map(str, cmds)) + if not rejections: + _logger.info("%s (%s) applied %s", login, name, cmdstr) + self._track_set_author(author, fallback=True) + return 'applied ' + cmdstr + + self.env.cr.rollback() + rejections_list = ''.join(f'\n- {r}' for r in rejections) + _logger.info("%s (%s) tried to apply %s%s", login, name, cmdstr, rejections_list) + footer = '' if len(cmds) == len(rejections) else "\n\nFor your own safety I've ignored everything in your comment." + if rejections_list: + rejections = ' ' + rejections_list.removeprefix("\n- ") if rejections_list.count('\n- ') == 1 else rejections_list + feedback(message=f"@{login}{rejections}{footer}") + return 'rejected' + + def _maybe_update_limit(self, limit: str) -> Tuple[bool, str]: + limit_id = self.env['runbot_merge.branch'].with_context(active_test=False).search([ + ('project_id', '=', self.repository.project_id.id), + ('name', '=', limit), + ]) + if not limit_id: + return True, f"there is no branch {limit!r}, it can't be used as a forward port target." + + if limit_id != self.target and not limit_id.active: + return True, f"branch {limit_id.name!r} is disabled, it can't be used as a forward port target." + + # not forward ported yet, just acknowledge the request + if not self.source_id and self.state != 'merged': + self.limit_id = limit_id + if branch_key(limit_id) <= branch_key(self.target): + return False, "Forward-port disabled (via limit)." + else: + return False, f"Forward-porting to {limit_id.name!r}." + + # if the PR has been forwardported + prs = (self | self.forwardport_ids | self.source_id | self.source_id.forwardport_ids) + tip = max(prs, key=pr_key) + # if the fp tip was closed it's fine + if tip.state == 'closed': + return True, f"{tip.display_name} is closed, no forward porting is going on" + + prs.limit_id = limit_id + + real_limit = max(limit_id, tip.target, key=branch_key) + + addendum = '' + # check if tip was queued for forward porting, try to cancel if we're + # supposed to stop here + if real_limit == tip.target and (task := self.env['forwardport.batches'].search([('batch_id', '=', tip.batch_id.id)])): + try: + with self.env.cr.savepoint(): + self.env.cr.execute( + "SELECT FROM forwardport_batches " + "WHERE id = %s FOR UPDATE NOWAIT", + [task.id]) + except psycopg2.errors.LockNotAvailable: + # row locked = port occurring and probably going to succeed, + # so next(real_limit) likely a done deal already + return True, ( + f"Forward port of {tip.display_name} likely already " + f"ongoing, unable to cancel, close next forward port " + f"when it completes.") + else: + self.env.cr.execute("DELETE FROM forwardport_batches WHERE id = %s", [task.id]) + + if real_limit != tip.target: + # forward porting was previously stopped at tip, and we want it to + # resume + if tip.state == 'merged': + self.env['forwardport.batches'].create({ + 'batch_id': tip.batch_id.id, + 'source': 'fp' if tip.parent_id else 'merge', + }) + resumed = tip + else: + resumed = tip.batch_id._schedule_fp_followup() + if resumed: + addendum += f', resuming forward-port stopped at {tip.display_name}' + + if real_limit != limit_id: + addendum += f' (instead of the requested {limit_id.name!r} because {tip.display_name} already exists)' + + # get a "stable" root rather than self's to avoid divertences between + # PRs across a root divide (where one post-root would point to the root, + # and one pre-root would point to the source, or a previous root) + root = tip.root_id + # reference the root being forward ported unless we are the root + root_ref = '' if root == self else f' {root.display_name}' + msg = f"Forward-porting{root_ref} to {real_limit.name!r}{addendum}." + # send a message to the source & root except for self, if they exist + root_msg = f'Forward-porting to {real_limit.name!r} (from {self.display_name}).' + self.env['runbot_merge.pull_requests.feedback'].create([ + { + 'repository': p.repository.id, + 'pull_request': p.number, + 'message': root_msg, + 'token_field': 'fp_github_token', + } + # send messages to source and root unless root is self (as it + # already gets the normal message) + for p in (self.source_id | root) - self + ]) + + return False, msg + + + def _find_next_target(self) -> Optional[Branch]: + """ Finds the branch between target and limit_id which follows + reference + """ + root = (self.source_id or self) + if self.target == root.limit_id: + return None + + branches = root.target.project_id.with_context(active_test=False)._forward_port_ordered() + if (branch_filter := self.repository.branch_filter) and branch_filter != '[]': + branches = branches.filtered_domain(ast.literal_eval(branch_filter)) + + branches = list(branches) + from_ = branches.index(self.target) + 1 + to_ = branches.index(root.limit_id) + 1 if root.limit_id else None + + # return the first active branch in the set + return next(( + branch + for branch in branches[from_:to_] + if branch.active + ), None) + + + def _approve(self, author, login): + oldstate = self.state + newstate = RPLUS.get(oldstate) + if not author.email: + return "I must know your email before you can review PRs. Please contact an administrator." + + if not newstate: + # Don't fail the entire command if someone tries to approve an + # already-approved PR. + if self.error: + msg = "This PR is already reviewed, it's in error, you might want to `retry` it instead " \ + "(if you have already confirmed the error is not legitimate)." + else: + msg = "This PR is already reviewed, reviewing it again is useless." + self.env['runbot_merge.pull_requests.feedback'].create({ 'repository': self.repository.id, 'pull_request': self.number, - 'message': joiner.join(msgs), + 'message': msg, }) + return None - msg = [] - if applied: - msg.append('applied ' + ' '.join(applied)) - if ignored: - ignoredstr = ' '.join(ignored) - msg.append('ignored ' + ignoredstr) - return '\n'.join(msg) + self.reviewed_by = author + _logger.debug( + "r+ on %s by %s (%s->%s) status=%s message? %s", + self.display_name, author.github_login, + oldstate, newstate, + self.status, self.status == 'failure' + ) + if self.status == 'failure': + # the normal infrastructure is for failure and + # prefixes messages with "I'm sorry" + self.env.ref("runbot_merge.command.approve.failure")._send( + repository=self.repository, + pull_request=self.number, + format_args={'user': login, 'pr': self}, + ) + if not (self.squash or self.merge_method): + self.env.ref('runbot_merge.check_linked_prs_status')._trigger() + return None def _pr_acl(self, user): if not self: @@ -1006,33 +1133,76 @@ class PullRequests(models.Model): # could have two PRs (e.g. one open and one closed) at least # temporarily on the same head, or on the same head with different # targets - failed = self.browse(()) + updateable = self.filtered(lambda p: not p.merge_date) + updateable.statuses = statuses + for pr in updateable: + if pr.status == "failure": + statuses = json.loads(pr.statuses_full) + for ci in pr.repository.status_ids._for_pr(pr).mapped('context'): + status = statuses.get(ci) or {'state': 'pending'} + if status['state'] in ('error', 'failure'): + pr._notify_ci_new_failure(ci, status) + self.batch_id._schedule_fp_followup() + + def modified(self, fnames, create=False, before=False): + """ By default, Odoo can't express recursive *dependencies* which is + exactly what we need for statuses: they depend on the current PR's + overrides, and the parent's overrides, and *its* parent's overrides, ... + + One option would be to create a stored computed field which accumulates + the overrides as *fields* can be recursive, but... + """ + if 'overrides' in fnames: + descendants_or_self = self.concat(*self._iter_descendants()) + self.env.add_to_compute(self._fields['status'], descendants_or_self) + self.env.add_to_compute(self._fields['statuses_full'], descendants_or_self) + self.env.add_to_compute(self._fields['state'], descendants_or_self) + super().modified(fnames, create, before) + + @api.depends( + 'statuses', 'overrides', 'target', 'parent_id', + 'repository.status_ids.context', + 'repository.status_ids.branch_filter', + 'repository.status_ids.prs', + ) + def _compute_statuses(self): for pr in self: - required = pr.repository.status_ids._for_pr(pr).mapped('context') - sts = {**statuses, **pr._get_overrides()} + statuses = {**json.loads(pr.statuses), **pr._get_overrides()} - success = True - for ci in required: - st = state_(sts, ci) or 'pending' - if st == 'success': - continue + pr.statuses_full = json.dumps(statuses, indent=4) + + st = 'success' + for ci in pr.repository.status_ids._for_pr(pr): + v = (statuses.get(ci.context) or {'state': 'pending'})['state'] + if v in ('error', 'failure'): + st = 'failure' + break + if v == 'pending': + st = 'pending' + pr.status = st + + @api.depends( + "status", "reviewed_by", "closed", "error" , + "batch_id.merge_date", + "batch_id.skipchecks", + ) + def _compute_state(self): + for pr in self: + if pr.batch_id.merge_date: + pr.state = 'merged' + elif pr.closed: + pr.state = "closed" + elif pr.error: + pr.state = "error" + elif pr.batch_id.skipchecks: # skipchecks behaves as both approval and status override + pr.state = "ready" + else: + states = ("opened", "approved", "validated", "ready") + pr.state = states[bool(pr.reviewed_by) | ((pr.status == "success") << 1)] - success = False - if st in ('error', 'failure'): - failed |= pr - pr._notify_ci_new_failure(ci, to_status(sts.get(ci.strip(), 'pending'))) - if success: - oldstate = pr.state - if oldstate == 'opened': - pr.state = 'validated' - elif oldstate == 'approved': - pr.state = 'ready' - return failed def _notify_ci_new_failure(self, ci, st): prev = json.loads(self.previous_failure) - if prev.get('state'): # old-style previous-failure - prev = {ci: prev} if not any(self._statuses_equivalent(st, v) for v in prev.values()): prev[ci] = st self.previous_failure = json.dumps(prev) @@ -1069,14 +1239,34 @@ class PullRequests(models.Model): def _notify_ci_failed(self, ci): # only report an issue of the PR is already approved (r+'d) if self.state == 'approved': - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': self.repository.id, - 'pull_request': self.number, - 'message': "%s%r failed on this reviewed PR." % (self.ping(), ci), - }) + self.env.ref("runbot_merge.failure.approved")._send( + repository=self.repository, + pull_request=self.number, + format_args={'pr': self, 'status': ci} + ) + elif self.state == 'opened' and self.parent_id: + # only care about FP PRs which are not approved / staged / merged yet + self.env.ref('runbot_merge.forwardport.ci.failed')._send( + repository=self.repository, + pull_request=self.number, + token_field='fp_github_token', + format_args={'pr': self, 'ci': ci}, + ) def _auto_init(self): - super(PullRequests, self)._auto_init() + for field in self._fields.values(): + if not isinstance(field, fields.Selection) or field.column_type[0] == 'varchar': + continue + + t = field.column_type[1] + self.env.cr.execute("SELECT 1 FROM pg_type WHERE typname = %s", [t]) + if not self.env.cr.rowcount: + self.env.cr.execute( + f"CREATE TYPE {t} AS ENUM %s", + [tuple(s for s, _ in field.selection)] + ) + + super()._auto_init() # incorrect index: unique(number, target, repository). tools.drop_index(self._cr, 'runbot_merge_unique_pr_per_target', self._table) # correct index: @@ -1092,19 +1282,48 @@ class PullRequests(models.Model): return 'staged' return self.state - @api.model - def create(self, vals): - pr = super().create(vals) - c = self.env['runbot_merge.commit'].search([('sha', '=', pr.head)]) - pr._validate(json.loads(c.statuses or '{}')) + def _get_batch(self, *, target, label): + batch = self.env['runbot_merge.batch'] + if not re.search(r':patch-\d+$', label): + batch = batch.search([ + ('merge_date', '=', False), + ('prs.target', '=', target), + ('prs.label', '=', label), + ]) + return batch or batch.create({}) - if pr.state not in ('closed', 'merged'): - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': pr.repository.id, - 'pull_request': pr.number, - 'message': f"[Pull request status dashboard]({pr.url}).", - }) - return pr + @api.model_create_multi + def create(self, vals_list): + for vals in vals_list: + batch = self._get_batch(target=vals['target'], label=vals['label']) + vals['batch_id'] = batch.id + if 'limit_id' not in vals: + limits = {p.limit_id for p in batch.prs} + if len(limits) == 1: + vals['limit_id'] = limits.pop().id + elif limits: + repo = self.env['runbot_merge.repository'].browse(vals['repository']) + _logger.warning( + "Unable to set limit on %s#%s: found multiple limits in batch (%s)", + repo.name, vals['number'], + ', '.join( + f'{p.display_name} => {p.limit_id.name}' + for p in batch.prs + ) + ) + + prs = super().create(vals_list) + for pr in prs: + c = self.env['runbot_merge.commit'].search([('sha', '=', pr.head)]) + pr._validate(c.statuses or '{}') + + if pr.state not in ('closed', 'merged'): + self.env.ref('runbot_merge.pr.created')._send( + repository=pr.repository, + pull_request=pr.number, + format_args={'pr': pr}, + ) + return prs def _from_gh(self, description, author=None, branch=None, repo=None): if repo is None: @@ -1122,7 +1341,7 @@ class PullRequests(models.Model): ], limit=1) return self.env['runbot_merge.pull_requests'].create({ - 'state': 'opened' if description['state'] == 'open' else 'closed', + 'closed': description['state'] != 'open', 'number': description['number'], 'label': repo._remap_label(description['head']['label']), 'author': author.id, @@ -1137,31 +1356,44 @@ class PullRequests(models.Model): def write(self, vals): if vals.get('squash'): vals['merge_method'] = False - prev = None - if 'target' in vals or 'message' in vals: - prev = { - pr.id: {'target': pr.target, 'message': pr.message} - for pr in self - } + # when explicitly marking a PR as ready + if vals.get('state') == 'ready': + # skip validation + self.batch_id.skipchecks = True + # mark current user as reviewer + vals.setdefault('reviewed_by', self.env.user.partner_id.id) + for p in self.batch_id.prs - self: + if not p.reviewed_by: + p.reviewed_by = self.env.user.partner_id.id + + for pr in self: + if (t := vals.get('target')) is not None and pr.target.id != t: + pr.unstage( + "target (base) branch was changed from %r to %r", + pr.target.display_name, + self.env['runbot_merge.branch'].browse(t).display_name, + ) + + if 'message' in vals: + merge_method = vals['merge_method'] if 'merge_method' in vals else pr.merge_method + if merge_method not in (False, 'rebase-ff') and pr.message != vals['message']: + pr.unstage("merge message updated") + + match vals.get('closed'): + case True if not self.closed: + vals['reviewed_by'] = False + case False if self.closed and not self.batch_id: + vals['batch_id'] = self._get_batch( + target=vals.get('target') or self.target.id, + label=vals.get('label') or self.label, + ) w = super().write(vals) newhead = vals.get('head') if newhead: c = self.env['runbot_merge.commit'].search([('sha', '=', newhead)]) - self._validate(json.loads(c.statuses or '{}')) - - if prev: - for pr in self: - old_target = prev[pr.id]['target'] - if pr.target != old_target: - pr.unstage( - "target (base) branch was changed from %r to %r", - old_target.display_name, pr.target.display_name, - ) - old_message = prev[pr.id]['message'] - if pr.merge_method not in (False, 'rebase-ff') and pr.message != old_message: - pr.unstage("merge message updated") + self._validate(c.statuses or '{}') return w def _check_linked_prs_statuses(self, commit=False): @@ -1193,8 +1425,6 @@ class PullRequests(models.Model): bool_or(pr.state = 'ready' AND NOT pr.link_warned) -- one of the others should be unready AND bool_or(pr.state != 'ready') - -- but ignore batches with one of the prs at p0 - AND bool_and(pr.priority != 0) """) for [ids] in self.env.cr.fetchall(): prs = self.browse(ids) @@ -1202,14 +1432,14 @@ class PullRequests(models.Model): unready = (prs - ready).sorted(key=lambda p: (p.repository.name, p.number)) for r in ready: - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': r.repository.id, - 'pull_request': r.number, - 'message': "{}linked pull request(s) {} not ready. Linked PRs are not staged until all of them are ready.".format( - r.ping(), - ', '.join(map('{0.display_name}'.format, unready)) - ) - }) + self.env.ref('runbot_merge.pr.linked.not_ready')._send( + repository=r.repository, + pull_request=r.number, + format_args={ + 'pr': r, + 'siblings': ', '.join(map('{0.display_name}'.format, unready)) + }, + ) r.link_warned = True if commit: self.env.cr.commit() @@ -1222,291 +1452,199 @@ class PullRequests(models.Model): if pair[0] != 'squash' ) for r in self.search([ - ('state', '=', 'ready'), + ('state', 'in', ("approved", "ready")), + ('staging_id', '=', False), ('squash', '=', False), ('merge_method', '=', False), ('method_warned', '=', False), ]): - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': r.repository.id, - 'pull_request': r.number, - 'message': "%sbecause this PR has multiple commits, I need to know how to merge it:\n\n%s" % ( - r.ping(), - methods, - ) - }) + self.env.ref('runbot_merge.pr.merge_method')._send( + repository=r.repository, + pull_request=r.number, + format_args={'pr': r, 'methods':methods}, + ) r.method_warned = True if commit: self.env.cr.commit() - def _parse_commit_message(self, message): - """ Parses a commit message to split out the pseudo-headers (which - should be at the end) from the body, and serialises back with a - predefined pseudo-headers ordering. - """ - return Message.from_message(message) - - def _is_mentioned(self, message, *, full_reference=False): - """Returns whether ``self`` is mentioned in ``message``` - - :param str | PullRequest message: - :param bool full_reference: whether the repository name must be present - :rtype: bool - """ - if full_reference: - pattern = fr'\b{re.escape(self.display_name)}\b' - else: - repository = self.repository.name # .replace('/', '\\/') - pattern = fr'( |\b{repository})#{self.number}\b' - return bool(re.search(pattern, message if isinstance(message, str) else message.message)) - - def _build_merge_message(self, message, related_prs=()): + def _build_message(self, message: Union['PullRequests', str], related_prs: 'PullRequests' = (), merge: bool = True) -> 'Message': # handle co-authored commits (https://help.github.com/articles/creating-a-commit-with-multiple-authors/) - m = self._parse_commit_message(message) - if not self._is_mentioned(message): - m.body += '\n\ncloses {pr.display_name}'.format(pr=self) - - for r in related_prs: - if not r._is_mentioned(message, full_reference=True): - m.headers.add('Related', r.display_name) - - if self.reviewed_by: - m.headers.add('signed-off-by', self.reviewed_by.formatted_email) - - return m - - def _add_self_references(self, commits): - """Adds a footer reference to ``self`` to all ``commits`` if they don't - already refer to the PR. - """ - for c in (c['commit'] for c in commits): - if not self._is_mentioned(c['message']): - m = self._parse_commit_message(c['message']) + m = Message.from_message(message) + if not is_mentioned(message, self): + if merge: + m.body += f'\n\ncloses {self.display_name}' + else: m.headers.pop('Part-Of', None) m.headers.add('Part-Of', self.display_name) - c['message'] = str(m) - def _stage(self, gh, target, related_prs=()): - # nb: pr_commits is oldest to newest so pr.head is pr_commits[-1] - _, prdict = gh.pr(self.number) - commits = prdict['commits'] - method = self.merge_method or ('rebase-ff' if commits == 1 else None) - if commits > 50 and method.startswith('rebase'): - raise exceptions.Unmergeable(self, "Rebasing 50 commits is too much.") - if commits > 250: - raise exceptions.Unmergeable( - self, "Merging PRs of 250 or more commits is not supported " - "(https://developer.github.com/v3/pulls/#list-commits-on-a-pull-request)" - ) - pr_commits = gh.commits(self.number) - for c in pr_commits: - if not (c['commit']['author']['email'] and c['commit']['committer']['email']): - raise exceptions.Unmergeable( - self, - f"All commits must have author and committer email, " - f"missing email on {c['sha']} indicates the authorship is " - f"most likely incorrect." - ) + for r in related_prs: + if not is_mentioned(message, r, full_reference=True): + m.headers.add('Related', r.display_name) - # sync and signal possibly missed updates - invalid = {} - diff = [] - pr_head = pr_commits[-1]['sha'] - if self.head != pr_head: - invalid['head'] = pr_head - diff.append(('Head', self.head, pr_head)) + # ensures all reviewers in the review path are on the PR in order: + # original reviewer, then last conflict reviewer, then current PR + reviewers = (self | self.root_id | self.source_id)\ + .mapped('reviewed_by.formatted_email') - if self.target.name != prdict['base']['ref']: - branch = self.env['runbot_merge.branch'].with_context(active_test=False).search([ - ('name', '=', prdict['base']['ref']), - ('project_id', '=', self.repository.project_id.id), - ]) - if not branch: - self.unlink() - raise exceptions.Unmergeable(self, "While staging, found this PR had been retargeted to an un-managed branch.") - invalid['target'] = branch.id - diff.append(('Target branch', self.target.name, branch.name)) - - if self.squash != commits == 1: - invalid['squash'] = commits == 1 - diff.append(('Single commit', self.squash, commits == 1)) - - msg = utils.make_message(prdict) - if self.message != msg: - invalid['message'] = msg - diff.append(('Message', self.message, msg)) - - if invalid: - self.write({**invalid, 'state': 'opened', 'head': pr_head}) - raise exceptions.Mismatch(invalid, diff) - - if self.reviewed_by and self.reviewed_by.name == self.reviewed_by.github_login: - # XXX: find other trigger(s) to sync github name? - gh_name = gh.user(self.reviewed_by.github_login)['name'] - if gh_name: - self.reviewed_by.name = gh_name - - # NOTE: lost merge v merge/copy distinction (head being - # a merge commit reused instead of being re-merged) - return method, getattr(self, '_stage_' + method.replace('-', '_'))( - gh, target, pr_commits, related_prs=related_prs) - - def _stage_squash(self, gh, target, commits, related_prs=()): - msg = self._build_merge_message(self, related_prs=related_prs) - authorship = {} - - authors = { - (c['commit']['author']['name'], c['commit']['author']['email']) - for c in commits - } - if len(authors) == 1: - name, email = authors.pop() - authorship['author'] = {'name': name, 'email': email} - else: - msg.headers.extend(sorted( - ('Co-Authored-By', "%s <%s>" % author) - for author in authors - )) - - committers = { - (c['commit']['committer']['name'], c['commit']['committer']['email']) - for c in commits - } - if len(committers) == 1: - name, email = committers.pop() - authorship['committer'] = {'name': name, 'email': email} - # should committers also be added to co-authors? - - original_head = gh.head(target) - merge_tree = gh.merge(self.head, target, 'temp merge')['tree']['sha'] - head = gh('post', 'git/commits', json={ - **authorship, - 'message': str(msg), - 'tree': merge_tree, - 'parents': [original_head], - }).json()['sha'] - gh.set_ref(target, head) - - commits_map = {c['sha']: head for c in commits} - commits_map[''] = head - self.commits_map = json.dumps(commits_map) - - return head - - def _stage_rebase_ff(self, gh, target, commits, related_prs=()): - # updates head commit with PR number (if necessary) then rebases - # on top of target - msg = self._build_merge_message(commits[-1]['commit']['message'], related_prs=related_prs) - commits[-1]['commit']['message'] = str(msg) - self._add_self_references(commits[:-1]) - head, mapping = gh.rebase(self.number, target, commits=commits) - self.commits_map = json.dumps({**mapping, '': head}) - return head - - def _stage_rebase_merge(self, gh, target, commits, related_prs=()): - self._add_self_references(commits) - h, mapping = gh.rebase(self.number, target, reset=True, commits=commits) - msg = self._build_merge_message(self, related_prs=related_prs) - merge_head = gh.merge(h, target, str(msg))['sha'] - self.commits_map = json.dumps({**mapping, '': merge_head}) - return merge_head - - def _stage_merge(self, gh, target, commits, related_prs=()): - pr_head = commits[-1] # oldest to newest - base_commit = None - head_parents = {p['sha'] for p in pr_head['parents']} - if len(head_parents) > 1: - # look for parent(s?) of pr_head not in PR, means it's - # from target (so we merged target in pr) - merge = head_parents - {c['sha'] for c in commits} - external_parents = len(merge) - if external_parents > 1: - raise exceptions.Unmergeable( - "The PR head can only have one parent from the base branch " - "(not part of the PR itself), found %d: %s" % ( - external_parents, - ', '.join(merge) - )) - if external_parents == 1: - [base_commit] = merge - - commits_map = {c['sha']: c['sha'] for c in commits} - if base_commit: - # replicate pr_head with base_commit replaced by - # the current head - original_head = gh.head(target) - merge_tree = gh.merge(pr_head['sha'], target, 'temp merge')['tree']['sha'] - new_parents = [original_head] + list(head_parents - {base_commit}) - msg = self._build_merge_message(pr_head['commit']['message'], related_prs=related_prs) - copy = gh('post', 'git/commits', json={ - 'message': str(msg), - 'tree': merge_tree, - 'author': pr_head['commit']['author'], - 'committer': pr_head['commit']['committer'], - 'parents': new_parents, - }).json() - gh.set_ref(target, copy['sha']) - # merge commit *and old PR head* map to the pr head replica - commits_map[''] = commits_map[pr_head['sha']] = copy['sha'] - self.commits_map = json.dumps(commits_map) - return copy['sha'] - else: - # otherwise do a regular merge - msg = self._build_merge_message(self) - merge_head = gh.merge(self.head, target, str(msg))['sha'] - # and the merge commit is the normal merge head - commits_map[''] = merge_head - self.commits_map = json.dumps(commits_map) - return merge_head + sobs = m.headers.getlist('signed-off-by') + m.headers.remove('signed-off-by') + m.headers.extend( + ('signed-off-by', signer) + for signer in sobs + if signer not in reviewers + ) + m.headers.extend( + ('signed-off-by', reviewer) + for reviewer in reversed(reviewers) + ) + return m def unstage(self, reason, *args): """ If the PR is staged, cancel the staging. If the PR is split and waiting, remove it from the split (possibly delete the split entirely) """ - split_batches = self.with_context(active_test=False).mapped('batch_ids').filtered('split_id') - if len(split_batches) > 1: - _logger.warning("Found a PR linked with more than one split batch: %s (%s)", self, split_batches) - for b in split_batches: - if len(b.split_id.batch_ids) == 1: - # only the batch of this PR -> delete split - b.split_id.unlink() - else: - # else remove this batch from the split - b.split_id = False + split = self.batch_id.split_id + if len(split.batch_ids) == 1: + # only the batch of this PR -> delete split + split.unlink() + else: + # else remove this batch from the split + self.batch_id.split_id = False self.staging_id.cancel('%s ' + reason, self.display_name, *args) def _try_closing(self, by): # ignore if the PR is already being updated in a separate transaction # (most likely being merged?) + self.flush_recordset(['state', 'batch_id']) self.env.cr.execute(''' - SELECT id, state FROM runbot_merge_pull_requests - WHERE id = %s AND state != 'merged' + SELECT batch_id FROM runbot_merge_pull_requests + WHERE id = %s AND state != 'merged' AND state != 'closed' FOR UPDATE SKIP LOCKED; ''', [self.id]) - if not self.env.cr.fetchone(): + if not self.env.cr.rowcount: return False - self.env.cr.execute(''' - UPDATE runbot_merge_pull_requests - SET state = 'closed' - WHERE id = %s - ''', [self.id]) - self.env.cr.commit() - self.modified(['state']) self.unstage("closed by %s", by) + self.with_context(forwardport_detach_warn=False).write({ + 'closed': True, + 'reviewed_by': False, + 'parent_id': False, + 'detach_reason': f"Closed by {by}", + }) + self.search([('parent_id', '=', self.id)]).write({ + 'parent_id': False, + 'detach_reason': f"{by} closed parent PR {self.display_name}", + }) + return True + def _fp_conflict_feedback(self, previous_pr, conflicts): + (h, out, err, hh) = conflicts.get(previous_pr) or (None, None, None, None) + if h: + sout = serr = '' + if out.strip(): + sout = f"\nstdout:\n```\n{out}\n```\n" + if err.strip(): + serr = f"\nstderr:\n```\n{err}\n```\n" + + lines = '' + if len(hh) > 1: + lines = '\n' + ''.join( + '* %s%s\n' % (sha, ' <- on this commit' if sha == h else '') + for sha in hh + ) + template = 'runbot_merge.forwardport.failure' + format_args = { + 'pr': self, + 'commits': lines, + 'stdout': sout, + 'stderr': serr, + 'footer': FOOTER, + } + elif any(conflicts.values()): + template = 'runbot_merge.forwardport.linked' + format_args = { + 'pr': self, + 'siblings': ', '.join(p.display_name for p in (self.batch_id.prs - self)), + 'footer': FOOTER, + } + elif not self._find_next_target(): + ancestors = "".join( + f"* {p.display_name}\n" + for p in previous_pr._iter_ancestors() + if p.parent_id + if p.state not in ('closed', 'merged') + if p.target.active + ) + template = 'runbot_merge.forwardport.final' + format_args = { + 'pr': self, + 'containing': ' containing:' if ancestors else '.', + 'ancestors': ancestors, + 'footer': FOOTER, + } + else: + template = 'runbot_merge.forwardport.intermediate' + format_args = { + 'pr': self, + 'footer': FOOTER, + } + self.env.ref(template)._send( + repository=self.repository, + pull_request=self.number, + token_field='fp_github_token', + format_args=format_args, + ) + + def button_split(self): + if len(self.batch_id.prs) == 1: + raise UserError("Splitting a batch with a single PR is dumb") + + w = self.env['runbot_merge.pull_requests.split_off'].create({ + 'pr_id': self.id, + 'new_label': self.label, + }) + return { + 'type': 'ir.actions.act_window', + 'res_model': w._name, + 'res_id': w.id, + 'target': 'new', + 'views': [(False, 'form')], + } + + def _split_off(self, new_label): + # should not be usable to move a PR between batches (maybe later) + batch = self.env['runbot_merge.batch'] + if not re.search(r':patch-\d+$', new_label): + if batch.search([ + ('merge_date', '=', False), + ('prs.label', '=', new_label), + ]): + raise UserError("Can not split off to an existing batch") + + self.write({ + 'label': new_label, + 'batch_id': batch.create({}).id, + }) + +# ordering is a bit unintuitive because the lowest sequence (and name) +# is the last link of the fp chain, reasoning is a bit more natural the +# other way around (highest object is the last), especially with Python +# not really having lazy sorts in the stdlib +def branch_key(b: Branch, /, _key=itemgetter('sequence', 'name')): + return Reverse(_key(b)) + + +def pr_key(p: PullRequests, /): + return branch_key(p.target) + + # state changes on reviews RPLUS = { 'opened': 'approved', 'validated': 'ready', } -RMINUS = { - 'approved': 'opened', - 'ready': 'validated', - 'error': 'validated', -} _TAGS = { False: set(), @@ -1540,16 +1678,18 @@ class Tagging(models.Model): tags_remove = fields.Char(required=True, default='[]') tags_add = fields.Char(required=True, default='[]') - def create(self, values): - if values.pop('state_from', None): - values['tags_remove'] = ALL_TAGS - if 'state_to' in values: - values['tags_add'] = _TAGS[values.pop('state_to')] - if not isinstance(values.get('tags_remove', ''), str): - values['tags_remove'] = json.dumps(list(values['tags_remove'])) - if not isinstance(values.get('tags_add', ''), str): - values['tags_add'] = json.dumps(list(values['tags_add'])) - return super().create(values) + @api.model_create_multi + def create(self, vals_list): + for values in vals_list: + if values.pop('state_from', None): + values['tags_remove'] = ALL_TAGS + if 'state_to' in values: + values['tags_add'] = _TAGS[values.pop('state_to')] + if not isinstance(values.get('tags_remove', ''), str): + values['tags_remove'] = json.dumps(list(values['tags_remove'])) + if not isinstance(values.get('tags_add', ''), str): + values['tags_add'] = json.dumps(list(values['tags_add'])) + return super().create(vals_list) def _send(self): # noinspection SqlResolve @@ -1586,7 +1726,7 @@ class Tagging(models.Model): try: gh.change_tags(pr, tags_remove, tags_add) except Exception: - _logger.exception( + _logger.info( "Error while trying to change the tags of %s#%s from %s to %s", repo.name, pr, remove, add, ) @@ -1599,10 +1739,10 @@ class Feedback(models.Model): """ _name = _description = 'runbot_merge.pull_requests.feedback' - repository = fields.Many2one('runbot_merge.repository', required=True) + repository = fields.Many2one('runbot_merge.repository', required=True, index=True) # store the PR number (not id) as we may want to send feedback to PR # objects on non-handled branches - pull_request = fields.Integer(group_operator=None) + pull_request = fields.Integer(group_operator=None, index=True) message = fields.Char() close = fields.Boolean() token_field = fields.Selection( @@ -1612,6 +1752,12 @@ class Feedback(models.Model): help="Token field (from repo's project) to use to post messages" ) + @api.model_create_multi + def create(self, vals_list): + # any time a feedback is created, it can be sent + self.env.ref('runbot_merge.feedback_cron')._trigger() + return super().create(vals_list) + def _send(self): ghs = {} to_remove = [] @@ -1654,25 +1800,112 @@ class Feedback(models.Model): to_remove.append(f.id) self.browse(to_remove).unlink() +class FeedbackTemplate(models.Model): + _name = 'runbot_merge.pull_requests.feedback.template' + _description = "str.format templates for feedback messages, no integration," \ + "but that's their purpose" + _inherit = ['mail.thread'] + + template = fields.Text(tracking=True) + help = fields.Text(readonly=True) + + def _format(self, **args): + return self.template.format_map(args) + + def _send(self, *, repository: Repository, pull_request: int, format_args: dict, token_field: Optional[str] = None) -> Optional[Feedback]: + try: + feedback = { + 'repository': repository.id, + 'pull_request': pull_request, + 'message': self.template.format_map(format_args), + } + if token_field: + feedback['token_field'] = token_field + return self.env['runbot_merge.pull_requests.feedback'].create(feedback) + except Exception: + _logger.exception("Failed to render template %s", self.get_external_id()) + raise + + +class StagingCommits(models.Model): + _name = 'runbot_merge.stagings.commits' + _description = "Mergeable commits for stagings, always the actually merged " \ + "commit, never a uniquifier" + _log_access = False + + staging_id = fields.Many2one('runbot_merge.stagings', required=True) + commit_id = fields.Many2one('runbot_merge.commit', index=True, required=True) + repository_id = fields.Many2one('runbot_merge.repository', required=True) + + def _auto_init(self): + super()._auto_init() + # the same commit can be both head and tip (?) + tools.create_unique_index( + self.env.cr, self._table + "_unique", + self._table, ['staging_id', 'commit_id'] + ) + # there should be one head per staging per repository, unless one is a + # real head and one is a uniquifier head + tools.create_unique_index( + self.env.cr, self._table + "_unique_per_repo", + self._table, ['staging_id', 'repository_id'], + ) + + +class StagingHeads(models.Model): + _name = 'runbot_merge.stagings.heads' + _description = "Staging heads, may be the staging's commit or may be a " \ + "uniquifier (discarded on success)" + _log_access = False + + staging_id = fields.Many2one('runbot_merge.stagings', required=True) + commit_id = fields.Many2one('runbot_merge.commit', index=True, required=True) + repository_id = fields.Many2one('runbot_merge.repository', required=True) + + def _auto_init(self): + super()._auto_init() + # the same commit can be both head and tip (?) + tools.create_unique_index( + self.env.cr, self._table + "_unique", + self._table, ['staging_id', 'commit_id'] + ) + # there should be one head per staging per repository, unless one is a + # real head and one is a uniquifier head + tools.create_unique_index( + self.env.cr, self._table + "_unique_per_repo", + self._table, ['staging_id', 'repository_id'], + ) + + class Commit(models.Model): """Represents a commit onto which statuses might be posted, independent of everything else as commits can be created by statuses only, by PR pushes, by branch updates, ... """ _name = _description = 'runbot_merge.commit' + _rec_name = 'sha' sha = fields.Char(required=True) statuses = fields.Char(help="json-encoded mapping of status contexts to states", default="{}") to_check = fields.Boolean(default=False) + head_ids = fields.Many2many('runbot_merge.stagings', relation='runbot_merge_stagings_heads', column2='staging_id', column1='commit_id') + commit_ids = fields.Many2many('runbot_merge.stagings', relation='runbot_merge_stagings_commits', column2='staging_id', column1='commit_id') + pull_requests = fields.One2many('runbot_merge.pull_requests', compute='_compute_prs') + + @api.model_create_multi def create(self, values): - values['to_check'] = True + for vals in values: + vals['to_check'] = True r = super(Commit, self).create(values) + self.env.ref("runbot_merge.process_updated_commits")._trigger() return r def write(self, values): values.setdefault('to_check', True) r = super(Commit, self).write(values) + if values['to_check']: + self.env.ref("runbot_merge.process_updated_commits")._trigger() return r def _notify(self): @@ -1680,24 +1913,27 @@ class Commit(models.Model): PRs = self.env['runbot_merge.pull_requests'] # chances are low that we'll have more than one commit for c in self.search([('to_check', '=', True)]): + sha = c.sha + pr = PRs.search([('head', '=', sha)]) + stagings = Stagings.search([ + ('head_ids.sha', '=', sha), + ('state', '=', 'pending'), + ('target.project_id.staging_statuses', '=', True), + ]) try: c.to_check = False - st = json.loads(c.statuses) - pr = PRs.search([('head', '=', c.sha)]) + c.flush_recordset(['to_check']) if pr: - pr._validate(st) + pr._validate(c.statuses) + pr._track_set_log_message(html_escape(f"statuses changed on {sha}")) - stagings = Stagings.search([('heads', 'ilike', c.sha)]).filtered( - lambda s, h=c.sha: any( - head == h - for repo, head in json.loads(s.heads).items() - if not repo.endswith('^') - ) - ) if stagings: - stagings._validate() + stagings._notify(c) + except psycopg2.errors.SerializationFailure: + _logger.info("Failed to apply commit %s (%s): serialization failure", c, sha) + self.env.cr.rollback() except Exception: - _logger.exception("Failed to apply commit %s (%s)", c, c.sha) + _logger.exception("Failed to apply commit %s (%s)", c, sha) self.env.cr.rollback() else: self.env.cr.commit() @@ -1719,131 +1955,180 @@ class Commit(models.Model): """) return res + def _compute_prs(self): + for c in self: + c.pull_requests = self.env['runbot_merge.pull_requests'].search([ + ('head', '=', c.sha), + ]) + + class Stagings(models.Model): _name = _description = 'runbot_merge.stagings' target = fields.Many2one('runbot_merge.branch', required=True, index=True) - batch_ids = fields.One2many( - 'runbot_merge.batch', 'staging_id', + staging_batch_ids = fields.One2many('runbot_merge.staging.batch', 'runbot_merge_stagings_id') + batch_ids = fields.Many2many( + 'runbot_merge.batch', context={'active_test': False}, + compute="_compute_batch_ids", + search="_search_batch_ids", ) + pr_ids = fields.One2many('runbot_merge.pull_requests', compute='_compute_prs') state = fields.Selection([ ('success', 'Success'), ('failure', 'Failure'), ('pending', 'Pending'), ('cancelled', "Cancelled"), ('ff_failed', "Fast forward failed") - ], default='pending') + ], default='pending', index=True, store=True, compute='_compute_state') active = fields.Boolean(default=True) staged_at = fields.Datetime(default=fields.Datetime.now, index=True) + staging_end = fields.Datetime(store=True, compute='_compute_state') + staging_duration = fields.Float(compute='_compute_duration') timeout_limit = fields.Datetime(store=True, compute='_compute_timeout_limit') reason = fields.Text("Reason for final state (if any)") - # seems simpler than adding yet another indirection through a model - heads = fields.Char(required=True, help="JSON-encoded map of heads, one per repo in the project") - head_ids = fields.Many2many('runbot_merge.commit', compute='_compute_statuses') + head_ids = fields.Many2many('runbot_merge.commit', relation='runbot_merge_stagings_heads', column1='staging_id', column2='commit_id') + heads = fields.One2many('runbot_merge.stagings.heads', 'staging_id') + commit_ids = fields.Many2many('runbot_merge.commit', relation='runbot_merge_stagings_commits', column1='staging_id', column2='commit_id') + commits = fields.One2many('runbot_merge.stagings.commits', 'staging_id') statuses = fields.Binary(compute='_compute_statuses') - statuses_cache = fields.Text() + statuses_cache = fields.Text(default='{}', required=True) - def write(self, vals): - # don't allow updating the statuses_cache - vals.pop('statuses_cache', None) + @api.depends('staged_at', 'staging_end') + def _compute_duration(self): + for s in self: + s.staging_duration = ((s.staging_end or fields.Datetime.now()) - s.staged_at).total_seconds() - if 'state' not in vals: - return super().write(vals) - - previously_pending = self.filtered(lambda s: s.state == 'pending') - super(Stagings, self).write(vals) - for staging in previously_pending: - if staging.state != 'pending': - super(Stagings, staging).write({ - 'statuses_cache': json.dumps(staging.statuses) - }) - - return True - - - def name_get(self): - return [ - (staging.id, "%d (%s, %s%s)" % ( + @api.depends('target.name', 'state', 'reason') + def _compute_display_name(self): + for staging in self: + staging.display_name = "%d (%s, %s%s)" % ( staging.id, staging.target.name, staging.state, (', ' + staging.reason) if staging.reason else '', - )) - for staging in self - ] + ) - @api.depends('heads') + @api.depends('staging_batch_ids.runbot_merge_batch_id') + def _compute_batch_ids(self): + for staging in self: + staging.batch_ids = staging.staging_batch_ids.runbot_merge_batch_id + + def _search_batch_ids(self, operator, value): + return [('staging_batch_ids.runbot_merge_batch_id', operator, value)] + + @api.depends('heads', 'statuses_cache') def _compute_statuses(self): """ Fetches statuses associated with the various heads, returned as (repo, context, state, url) """ - Commits = self.env['runbot_merge.commit'] - for st in self: - heads = { - head: repo for repo, head in json.loads(st.heads).items() - if not repo.endswith('^') - } - commits = st.head_ids = Commits.search([('sha', 'in', list(heads.keys()))]) - if st.statuses_cache: - st.statuses = json.loads(st.statuses_cache) - continue + heads = {h.commit_id: h.repository_id for h in self.mapped('heads')} + all_heads = self.mapped('head_ids') + for st in self: + statuses = json.loads(st.statuses_cache) + + commits = st.head_ids.with_prefetch(all_heads._prefetch_ids) st.statuses = [ ( - heads[commit.sha], + heads[commit].name, context, status.get('state') or 'pending', status.get('target_url') or '' ) for commit in commits - for context, st in json.loads(commit.statuses).items() - for status in [to_status(st)] + for context, status in statuses.get(commit.sha, {}).items() ] + def write(self, vals): + if timeout := vals.get('timeout_limit'): + self.env.ref("runbot_merge.merge_cron")\ + ._trigger(fields.Datetime.to_datetime(timeout)) + + if vals.get('active') is False: + self.env.ref("runbot_merge.staging_cron")._trigger() + + return super().write(vals) + # only depend on staged_at as it should not get modified, but we might # update the CI timeout after the staging have been created and we # *do not* want to update the staging timeouts in that case @api.depends('staged_at') def _compute_timeout_limit(self): + timeouts = set() for st in self: - st.timeout_limit = fields.Datetime.to_string( - fields.Datetime.from_string(st.staged_at) - + datetime.timedelta(minutes=st.target.project_id.ci_timeout) - ) + t = st.timeout_limit = st.staged_at + datetime.timedelta(minutes=st.target.project_id.ci_timeout) + timeouts.add(t) + if timeouts: + # we might have very different limits for each staging so need to schedule them all + self.env.ref("runbot_merge.merge_cron")._trigger_list(timeouts) - def _validate(self): - Commits = self.env['runbot_merge.commit'] + @api.depends('batch_ids.prs') + def _compute_prs(self): + for staging in self: + staging.pr_ids = staging.batch_ids.prs + + def _notify(self, c: Commit) -> None: + self.env.cr.execute(""" + UPDATE runbot_merge_stagings + SET statuses_cache = CASE + WHEN statuses_cache::jsonb->%(sha)s IS NULL + THEN jsonb_insert(statuses_cache::jsonb, ARRAY[%(sha)s], %(statuses)s::jsonb) + ELSE statuses_cache::jsonb || jsonb_build_object(%(sha)s, %(statuses)s::jsonb) + END::text + WHERE id = any(%(ids)s) + """, {'sha': c.sha, 'statuses': c.statuses, 'ids': self.ids}) + self.modified(['statuses_cache']) + + def post_status(self, sha, context, status, *, target_url=None, description=None): + if not self.env.user.has_group('runbot_merge.status'): + raise AccessError("You are not allowed to post a status.") + + for s in self: + if not s.target.project_id.staging_rpc: + continue + + if not any(c.commit_id.sha == sha for c in s.commits): + raise ValueError(f"Staging {s.id} does not have the commit {sha}") + + st = json.loads(s.statuses_cache) + st.setdefault(sha, {})[context] = { + 'state': status, + 'target_url': target_url, + 'description': description, + } + s.statuses_cache = json.dumps(st) + + return True + + @api.depends( + "statuses_cache", + "target", + "heads.commit_id.sha", + "heads.repository_id.status_ids.branch_filter", + "heads.repository_id.status_ids.context", + ) + def _compute_state(self): for s in self: if s.state != 'pending': continue - repos = { - repo.name: repo - for repo in self.env['runbot_merge.repository'].search([]) - .having_branch(s.target) - } # maps commits to the statuses they need required_statuses = [ - (head, repos[repo].status_ids._for_staging(s).mapped('context')) - for repo, head in json.loads(s.heads).items() - if not repo.endswith('^') + (h.commit_id.sha, h.repository_id.status_ids._for_staging(s).mapped('context')) + for h in s.heads ] - # maps commits to their statuses - cmap = { - c.sha: json.loads(c.statuses) - for c in Commits.search([('sha', 'in', [h for h, _ in required_statuses])]) - } + cmap = json.loads(s.statuses_cache) update_timeout_limit = False st = 'success' for head, reqs in required_statuses: statuses = cmap.get(head) or {} - for v in map(lambda n: state_(statuses, n), reqs): + for v in map(lambda n: statuses.get(n, {}).get('state'), reqs): if st == 'failure' or v in ('error', 'failure'): st = 'failure' elif v is None: @@ -1854,11 +2139,14 @@ class Stagings(models.Model): else: assert v == 'success' - vals = {'state': st} + s.state = st + if s.state != 'pending': + self.env.ref("runbot_merge.merge_cron")._trigger() + s.staging_end = fields.Datetime.now() if update_timeout_limit: - vals['timeout_limit'] = fields.Datetime.to_string(datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout)) - _logger.debug("%s got pending status, bumping timeout to %s (%s)", self, vals['timeout_limit'], cmap) - s.write(vals) + s.timeout_limit = datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout) + self.env.ref("runbot_merge.merge_cron")._trigger(s.timeout_limit) + _logger.debug("%s got pending status, bumping timeout to %s (%s)", self, s.timeout_limit, cmap) def action_cancel(self): w = self.env['runbot_merge.stagings.cancel'].create({ @@ -1876,33 +2164,34 @@ class Stagings(models.Model): def cancel(self, reason, *args): self = self.filtered('active') if not self: - return + return False _logger.info("Cancelling staging %s: " + reason, self, *args) - self.mapped('batch_ids').write({'active': False}) self.write({ 'active': False, 'state': 'cancelled', 'reason': reason % args, }) + return True def fail(self, message, prs=None): _logger.info("Staging %s failed: %s", self, message) prs = prs or self.batch_ids.prs - prs.write({'state': 'error'}) + prs._track_set_log_message(f'staging {self.id} failed: {message}') + prs.error = True for pr in prs: - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': pr.repository.id, - 'pull_request': pr.number, - 'message': "%sstaging failed: %s" % (pr.ping(), message), - }) + self.env.ref('runbot_merge.pr.staging.fail')._send( + repository=pr.repository, + pull_request=pr.number, + format_args={'pr': pr, 'message': message}, + ) - self.batch_ids.write({'active': False}) self.write({ 'active': False, 'state': 'failure', 'reason': message, }) + return True def try_splitting(self): batches = len(self.batch_ids) @@ -1912,15 +2201,14 @@ class Stagings(models.Model): # NB: batches remain attached to their original staging sh = self.env['runbot_merge.split'].create({ 'target': self.target.id, - 'batch_ids': [(4, batch.id, 0) for batch in h], + 'batch_ids': [Command.link(batch.id) for batch in h], }) st = self.env['runbot_merge.split'].create({ 'target': self.target.id, - 'batch_ids': [(4, batch.id, 0) for batch in t], + 'batch_ids': [Command.link(batch.id) for batch in t], }) _logger.info("Split %s to %s (%s) and %s (%s)", self, h, sh, t, st) - self.batch_ids.write({'active': False}) self.write({ 'active': False, 'state': 'failure', @@ -1928,47 +2216,36 @@ class Stagings(models.Model): }) return True - # single batch => the staging is an unredeemable failure + # single batch => the staging is an irredeemable failure if self.state != 'failure': # timed out, just mark all PRs (wheee) self.fail('timed out (>{} minutes)'.format(self.target.project_id.ci_timeout)) return False + staging_statuses = json.loads(self.statuses_cache) # try inferring which PR failed and only mark that one - for repo, head in json.loads(self.heads).items(): - if repo.endswith('^'): - continue + for head in self.heads: + required_statuses = set(head.repository_id.status_ids._for_staging(self).mapped('context')) - required_statuses = set( - self.env['runbot_merge.repository'] - .search([('name', '=', repo)]) - .status_ids - ._for_staging(self) - .mapped('context')) - - commit = self.env['runbot_merge.commit'].search([('sha', '=', head)]) - statuses = json.loads(commit.statuses or '{}') + statuses = staging_statuses.get(head.commit_id.sha, {}) reason = next(( ctx for ctx, result in statuses.items() if ctx in required_statuses - if to_status(result).get('state') in ('error', 'failure') + if result.get('state') in ('error', 'failure') ), None) if not reason: continue - pr = next(( - pr for pr in self.batch_ids.prs - if pr.repository.name == repo - ), None) + pr = next((pr for pr in self.batch_ids.prs if pr.repository == head.repository_id), None) - status = to_status(statuses[reason]) + status = statuses[reason] viewmore = '' if status.get('target_url'): viewmore = ' (view more at %(target_url)s)' % status if pr: self.fail("%s%s" % (reason, viewmore), pr) else: - self.fail('%s on %s%s' % (reason, head, viewmore)) + self.fail('%s on %s%s' % (reason, head.commit_id.sha, viewmore)) return False # the staging failed but we don't have a specific culprit, fail @@ -1995,19 +2272,21 @@ class Stagings(models.Model): project = self.target.project_id if self.state == 'success': gh = {repo.name: repo.github() for repo in project.repo_ids.having_branch(self.target)} - staging_heads = json.loads(self.heads) self.env.cr.execute(''' SELECT 1 FROM runbot_merge_pull_requests WHERE id in %s FOR UPDATE ''', [tuple(self.mapped('batch_ids.prs.id'))]) try: - self._safety_dance(gh, staging_heads) + with sentry_sdk.start_span(description="merge staging") as span: + span.set_tag("staging", self.id) + span.set_tag("branch", self.target.name) + self._safety_dance(gh, self.commits) except exceptions.FastForwardError as e: logger.warning( - "Could not fast-forward successful staging on %s:%s", + "Could not fast-forward successful staging on %s:%s: %s", e.args[0], self.target.name, - exc_info=True + e, ) self.write({ 'state': 'ff_failed', @@ -2015,11 +2294,12 @@ class Stagings(models.Model): }) else: prs = self.mapped('batch_ids.prs') + prs._track_set_log_message(f'staging {self.id} succeeded') logger.info( "%s FF successful, marking %s as merged", self, prs ) - prs.write({'state': 'merged'}) + self.batch_ids.merge_date = fields.Datetime.now() pseudobranch = None if self.target == project.branch_ids[:1]: @@ -2041,7 +2321,6 @@ class Stagings(models.Model): 'tags_add': json.dumps([pseudobranch]), }) finally: - self.batch_ids.write({'active': False}) self.write({'active': False}) elif self.state == 'failure' or self.is_timed_out(): self.try_splitting() @@ -2049,7 +2328,7 @@ class Stagings(models.Model): def is_timed_out(self): return fields.Datetime.from_string(self.timeout_limit) < datetime.datetime.now() - def _safety_dance(self, gh, staging_heads): + def _safety_dance(self, gh, staging_commits: StagingCommits): """ Reverting updates doesn't work if the branches are protected (because a revert is basically a force push). So we can update REPO_A, then fail to update REPO_B for some reason, and we're hosed. @@ -2066,51 +2345,69 @@ class Stagings(models.Model): bad. In that case, wait a bit and retry for now. A more complex strategy (including disabling the branch entirely until somebody has looked at and fixed the issue) might be necessary. - - :returns: the last repo it tried to update (probably the one on which - it failed, if it failed) """ - # FIXME: would make sense for FFE to be richer, and contain the repo name - repo_name = None tmp_target = 'tmp.' + self.target.name # first force-push the current targets to all tmps - for repo_name in staging_heads.keys(): - if repo_name.endswith('^'): - continue + for repo_name in staging_commits.mapped('repository_id.name'): g = gh[repo_name] g.set_ref(tmp_target, g.head(self.target.name)) - # then attempt to FF the tmp to the staging - for repo_name, head in staging_heads.items(): - if repo_name.endswith('^'): - continue - gh[repo_name].fast_forward(tmp_target, staging_heads.get(repo_name + '^') or head) + # then attempt to FF the tmp to the staging commits + for c in staging_commits: + gh[c.repository_id.name].fast_forward(tmp_target, c.commit_id.sha) # there is still a race condition here, but it's way # lower than "the entire staging duration"... - first = True - for repo_name, head in staging_heads.items(): - if repo_name.endswith('^'): - continue - + for i, c in enumerate(staging_commits): for pause in [0.1, 0.3, 0.5, 0.9, 0]: # last one must be 0/falsy of we lose the exception try: - # if the staging has a $repo^ head, merge that, - # otherwise merge the regular (CI'd) head - gh[repo_name].fast_forward( + gh[c.repository_id.name].fast_forward( self.target.name, - staging_heads.get(repo_name + '^') or head + c.commit_id.sha ) except exceptions.FastForwardError: - # The GH API regularly fails us. If the failure does not - # occur on the first repository, retry a few times with a - # little pause. - if not first and pause: + if i and pause: time.sleep(pause) continue raise else: break - first = False - return repo_name + + @api.returns('runbot_merge.stagings') + def for_heads(self, *heads): + """Returns the staging(s) with all the specified heads. Heads should + be unique git oids. + """ + if not heads: + return self.browse(()) + + joins = ''.join( + f'\nJOIN runbot_merge_stagings_heads h{i} ON h{i}.staging_id = s.id' + f'\nJOIN runbot_merge_commit c{i} ON c{i}.id = h{i}.commit_id AND c{i}.sha = %s\n' + for i in range(len(heads)) + ) + self.env.cr.execute(f"SELECT s.id FROM runbot_merge_stagings s {joins}", heads) + stagings = self.browse(id for [id] in self.env.cr.fetchall()) + stagings.check_access_rights('read') + stagings.check_access_rule('read') + return stagings + + @api.returns('runbot_merge.stagings') + def for_commits(self, *heads): + """Returns the staging(s) with all the specified commits (heads which + have actually been merged). Commits should be unique git oids. + """ + if not heads: + return self.browse(()) + + joins = ''.join( + f'\nJOIN runbot_merge_stagings_commits h{i} ON h{i}.staging_id = s.id' + f'\nJOIN runbot_merge_commit c{i} ON c{i}.id = h{i}.commit_id AND c{i}.sha = %s\n' + for i in range(len(heads)) + ) + self.env.cr.execute(f"SELECT s.id FROM runbot_merge_stagings s {joins}", heads) + stagings = self.browse(id for [id] in self.env.cr.fetchall()) + stagings.check_access_rights('read') + stagings.check_access_rule('read') + return stagings class Split(models.Model): _name = _description = 'runbot_merge.split' @@ -2118,128 +2415,6 @@ class Split(models.Model): target = fields.Many2one('runbot_merge.branch', required=True) batch_ids = fields.One2many('runbot_merge.batch', 'split_id', context={'active_test': False}) -class Batch(models.Model): - """ A batch is a "horizontal" grouping of *codependent* PRs: PRs with - the same label & target but for different repositories. These are - assumed to be part of the same "change" smeared over multiple - repositories e.g. change an API in repo1, this breaks use of that API - in repo2 which now needs to be updated. - """ - _name = _description = 'runbot_merge.batch' - - target = fields.Many2one('runbot_merge.branch', required=True, index=True) - staging_id = fields.Many2one('runbot_merge.stagings', index=True) - split_id = fields.Many2one('runbot_merge.split', index=True) - - prs = fields.Many2many('runbot_merge.pull_requests') - - active = fields.Boolean(default=True) - - @api.constrains('target', 'prs') - def _check_prs(self): - for batch in self: - repos = self.env['runbot_merge.repository'] - for pr in batch.prs: - if pr.target != batch.target: - raise ValidationError("A batch and its PRs must have the same branch, got %s and %s" % (batch.target, pr.target)) - if pr.repository in repos: - raise ValidationError("All prs of a batch must have different target repositories, got a duplicate %s on %s" % (pr.repository, pr)) - repos |= pr.repository - - def stage(self, meta, prs): - """ - Updates meta[*][head] on success - - :return: () or Batch object (if all prs successfully staged) - """ - new_heads = {} - pr_fields = self.env['runbot_merge.pull_requests']._fields - for pr in prs: - gh = meta[pr.repository]['gh'] - - _logger.info( - "Staging pr %s for target %s; method=%s", - pr.display_name, pr.target.name, - pr.merge_method or (pr.squash and 'single') or None - ) - - target = 'tmp.{}'.format(pr.target.name) - original_head = gh.head(target) - try: - try: - method, new_heads[pr] = pr._stage(gh, target, related_prs=(prs - pr)) - _logger.info( - "Staged pr %s to %s by %s: %s -> %s", - pr.display_name, pr.target.name, method, - original_head, new_heads[pr] - ) - except Exception: - # reset the head which failed, as rebase() may have partially - # updated it (despite later steps failing) - gh.set_ref(target, original_head) - # then reset every previous update - for to_revert in new_heads.keys(): - it = meta[to_revert.repository] - it['gh'].set_ref('tmp.{}'.format(to_revert.target.name), it['head']) - raise - except github.MergeError: - raise exceptions.MergeError(pr) - except exceptions.Mismatch as e: - def format_items(items): - """ Bit of a pain in the ass because difflib really wants - all lines to be newline-terminated, but not all values are - actual lines, and also needs to split multiline values. - """ - for name, value in items: - yield name + ':\n' - if not value.endswith('\n'): - value += '\n' - yield from value.splitlines(keepends=True) - yield '\n' - - old = list(format_items((n, str(v)) for n, v, _ in e.args[1])) - new = list(format_items((n, str(v)) for n, _, v in e.args[1])) - diff = ''.join(Differ().compare(old, new)) - _logger.warning( - "data mismatch on %s:\n%s", - pr.display_name, diff - ) - self.env['runbot_merge.pull_requests.feedback'].create({ - 'repository': pr.repository.id, - 'pull_request': pr.number, - 'message': """\ -{ping}we apparently missed updates to this PR and tried to stage it in a state \ -which might not have been approved. - -The properties {mismatch} were not correctly synchronized and have been updated. - -<details><summary>differences</summary> - -```diff -{diff}``` -</details> - -Note that we are unable to check the properties {unchecked}. - -Please check and re-approve. -""".format( - ping=pr.ping(), - mismatch=', '.join(pr_fields[f].string for f in e.args[0]), - diff=diff, - unchecked=', '.join(pr_fields[f].string for f in UNCHECKABLE), -) - }) - return self.env['runbot_merge.batch'] - - # update meta to new heads - for pr, head in new_heads.items(): - meta[pr.repository]['head'] = head - return self.create({ - 'target': prs[0].target.id, - 'prs': [(4, pr.id, 0) for pr in prs], - }) - -UNCHECKABLE = ['merge_method', 'overrides', 'draft'] class FetchJob(models.Model): _name = _description = 'runbot_merge.fetch_job' @@ -2247,6 +2422,12 @@ class FetchJob(models.Model): active = fields.Boolean(default=True) repository = fields.Many2one('runbot_merge.repository', required=True) number = fields.Integer(required=True, group_operator=None) + closing = fields.Boolean(default=False) + + @api.model_create_multi + def create(self, vals_list): + self.env.ref('runbot_merge.fetch_prs_cron')._trigger() + return super().create(vals_list) def _check(self, commit=False): """ @@ -2259,7 +2440,7 @@ class FetchJob(models.Model): self.env.cr.execute("SAVEPOINT runbot_merge_before_fetch") try: - f.repository._load_pr(f.number) + f.repository._load_pr(f.number, closing=f.closing) except Exception: self.env.cr.execute("ROLLBACK TO SAVEPOINT runbot_merge_before_fetch") _logger.exception("Failed to load pr %s, skipping it", f.number) @@ -2270,159 +2451,5 @@ class FetchJob(models.Model): if commit: self.env.cr.commit() -# The commit (and PR) statuses was originally a map of ``{context:state}`` -# however it turns out to clarify error messages it'd be useful to have -# a bit more information e.g. a link to the CI's build info on failure and -# all that. So the db-stored statuses are now becoming a map of -# ``{ context: {state, target_url, description } }``. The issue here is -# there's already statuses stored in the db so we need to handle both -# formats, hence these utility functions) -def state_(statuses, name): - """ Fetches the status state """ - name = name.strip() - v = statuses.get(name) - if isinstance(v, dict): - return v.get('state') - return v -def to_status(v): - """ Converts old-style status values (just a state string) to new-style - (``{state, target_url, description}``) - :type v: str | dict - :rtype: dict - """ - if isinstance(v, dict): - return v - return {'state': v, 'target_url': None, 'description': None} - -refline = re.compile(rb'([\da-f]{40}) ([^\0\n]+)(\0.*)?\n?$') -ZERO_REF = b'0'*40 -def parse_refs_smart(read): - """ yields pkt-line data (bytes), or None for flush lines """ - def read_line(): - length = int(read(4), 16) - if length == 0: - return None - return read(length - 4) - - header = read_line() - assert header.rstrip() == b'# service=git-upload-pack', header - assert read_line() is None, "failed to find first flush line" - # read lines until second delimiter - for line in iter(read_line, None): - if line.startswith(ZERO_REF): - break # empty list (no refs) - m = refline.match(line) - yield m[1].decode(), m[2].decode() - -BREAK = re.compile(r''' - ^ - [ ]{0,3} # 0-3 spaces of indentation - # followed by a sequence of three or more matching -, _, or * characters, - # each followed optionally by any number of spaces or tabs - # so needs to start with a _, - or *, then have at least 2 more such - # interspersed with any number of spaces or tabs - ([*_-]) - ([ \t]*\1){2,} - [ \t]* - $ -''', flags=re.VERBOSE) -SETEX_UNDERLINE = re.compile(r''' - ^ - [ ]{0,3} # no more than 3 spaces indentation - [-=]+ # a sequence of = characters or a sequence of - characters - [ ]* # any number of trailing spaces - $ - # we don't care about "a line containing a single -" because we want to - # disambiguate SETEX headings from thematic breaks, and thematic breaks have - # 3+ -. Doesn't look like GH interprets `- - -` as a line so yay... -''', flags=re.VERBOSE) -HEADER = re.compile('^([A-Za-z-]+): (.*)$') -class Message: - @classmethod - def from_message(cls, msg): - in_headers = True - maybe_setex = None - # creating from PR message -> remove content following break - msg, handle_break = (msg, False) if isinstance(msg, str) else (msg.message, True) - headers = [] - body = [] - # don't process the title (first line) of the commit message - msg = msg.splitlines() - for line in reversed(msg[1:]): - if maybe_setex: - # NOTE: actually slightly more complicated: it's a SETEX heading - # only if preceding line(s) can be interpreted as a - # paragraph so e.g. a title followed by a line of dashes - # would indeed be a break, but this should be good enough - # for now, if we need more we'll need a full-blown - # markdown parser probably - if line: # actually a SETEX title -> add underline to body then process current - body.append(maybe_setex) - else: # actually break, remove body then process current - body = [] - maybe_setex = None - - if not line: - if not in_headers and body and body[-1]: - body.append(line) - continue - - if handle_break and BREAK.match(line): - if SETEX_UNDERLINE.match(line): - maybe_setex = line - else: - body = [] - continue - - h = HEADER.match(line) - if h: - # c-a-b = special case from an existing test, not sure if actually useful? - if in_headers or h.group(1).lower() == 'co-authored-by': - headers.append(h.groups()) - continue - - body.append(line) - in_headers = False - - # if there are non-title body lines, add a separation after the title - if body and body[-1]: - body.append('') - body.append(msg[0]) - return cls('\n'.join(reversed(body)), Headers(reversed(headers))) - - def __init__(self, body, headers=None): - self.body = body - self.headers = headers or Headers() - - def __setattr__(self, name, value): - # make sure stored body is always stripped - if name == 'body': - value = value and value.strip() - super().__setattr__(name, value) - - def __str__(self): - if not self.headers: - return self.body + '\n' - - with io.StringIO(self.body) as msg: - msg.write(self.body) - msg.write('\n\n') - # https://git.wiki.kernel.org/index.php/CommitMessageConventions - # seems to mostly use capitalised names (rather than title-cased) - keys = list(OrderedSet(k.capitalize() for k in self.headers.keys())) - # c-a-b must be at the very end otherwise github doesn't see it - keys.sort(key=lambda k: k == 'Co-authored-by') - for k in keys: - for v in self.headers.getlist(k): - msg.write(k) - msg.write(': ') - msg.write(v) - msg.write('\n') - - return msg.getvalue() - - def sub(self, pattern, repl, *, flags): - """ Performs in-place replacements on the body - """ - self.body = re.sub(pattern, repl, self.body, flags=flags) +from .stagings_create import is_mentioned, Message diff --git a/runbot_merge/models/res_partner.py b/runbot_merge/models/res_partner.py index d627b2f4..5e805dda 100644 --- a/runbot_merge/models/res_partner.py +++ b/runbot_merge/models/res_partner.py @@ -1,7 +1,10 @@ import random from email.utils import parseaddr -from odoo import fields, models, tools, api +from markupsafe import Markup, escape + +import odoo.tools +from odoo import fields, models, tools, api, Command from .. import github @@ -11,7 +14,8 @@ class CIText(fields.Char): column_cast_from = ('varchar', 'text') class Partner(models.Model): - _inherit = 'res.partner' + _name = 'res.partner' + _inherit = ['res.partner', 'mail.thread'] email = fields.Char(index=True) github_login = CIText() @@ -19,6 +23,7 @@ class Partner(models.Model): formatted_email = fields.Char(string="commit email", compute='_rfc5322_formatted') review_rights = fields.One2many('res.partner.review', 'partner_id') override_rights = fields.Many2many('res.partner.override') + override_sensitive = fields.Boolean(compute="_compute_sensitive_overrides") def _auto_init(self): res = super(Partner, self)._auto_init() @@ -45,6 +50,71 @@ class Partner(models.Model): p.email = gh.user(p.github_login)['email'] or False return False + @api.depends("override_rights.context") + def _compute_sensitive_overrides(self): + for p in self: + p.override_sensitive = any(o.context == 'ci/security' for o in p.override_rights) + + def write(self, vals): + created = [] + updated = {} + deleted = set() + for cmd, id, values in vals.get('review_rights', []): + if cmd == Command.DELETE: + deleted.add(id) + elif cmd == Command.CREATE: + # 'repository_id': 3, 'review': True, 'self_review': False + created.append(values) + elif cmd == Command.UPDATE: + updated[id] = values + # could also be LINK for records which are not touched but we don't care + + new_rights = None + if r := vals.get('override_rights'): + # only handle reset (for now?) even though technically e.g. 0 works + # the web client doesn't seem to use it (?) + if r[0][0] == 6: + new_rights = self.env['res.partner.override'].browse(r[0][2]) + + Repo = self.env['runbot_merge.repository'].browse + for p in self: + msgs = [] + if ds := p.review_rights.filtered(lambda r: r.id in deleted): + msgs.append("removed review rights on {}\n".format( + ', '.join(ds.mapped('repository_id.name')) + )) + if us := p.review_rights.filtered(lambda r: r.id in updated): + msgs.extend( + "updated review rights on {}: {}\n".format( + u.repository_id.name, + ', '.join( + f'allowed {f}' if v else f'forbid {f}' + for f in ['review', 'self_review'] + if (v := updated[u.id].get(f)) is not None + ) + ) + for u in us + ) + msgs.extend( + 'added review rights on {}: {}\n'.format( + Repo(c['repository_id']).name, + ', '.join(filter(c.get, ['review', 'self_review'])), + ) + for c in created + ) + if new_rights is not None: + for r in p.override_rights - new_rights: + msgs.append(f"removed override rights for {r.context!r} on {r.repository_id.name}") + for r in new_rights - p.override_rights: + msgs.append(f"added override rights for {r.context!r} on {r.repository_id.name}") + if msgs: + p._message_log(body=Markup('<ul>{}</ul>').format(Markup().join( + map(Markup('<li>{}</li>').format, reversed(msgs)) + ))) + + return super().write(vals) + + class PartnerMerge(models.TransientModel): _inherit = 'base.partner.merge.automatic.wizard' @@ -75,14 +145,13 @@ class ReviewRights(models.Model): tools.create_unique_index(self._cr, 'runbot_merge_review_m2m', self._table, ['partner_id', 'repository_id']) return res - def name_get(self): - return [ - (r.id, '%s: %s' % (r.repository_id.name, ', '.join(filter(None, [ + @api.depends('repository_id.name', 'review', 'self_review') + def _compute_display_name(self): + for r in self: + r.display_name = '%s: %s' % (r.repository_id.name, ', '.join(filter(None, [ r.review and "reviewer", r.self_review and "self-reviewer" - ])))) - for r in self - ] + ]))) @api.model def name_search(self, name='', args=None, operator='ilike', limit=100): @@ -103,6 +172,42 @@ class OverrideRights(models.Model): ['context', 'coalesce(repository_id, 0)'] ) + @api.model_create_multi + def create(self, vals_list): + for partner, contexts in odoo.tools.groupby(( + (partner_id, vals['context'], vals['repository_id']) + for vals in vals_list + # partner_ids is of the form [Command.set(ids) + for partner_id in vals.get('partner_ids', [(None, None, [])])[0][2] + ), lambda p: p[0]): + partner = self.env['res.partner'].browse(partner) + for _, context, repository in contexts: + repository = self.env['runbot_merge.repository'].browse(repository) + partner._message_log(body=f"added override rights for {context!r} on {repository.name}") + + return super().create(vals_list) + + def write(self, vals): + new = None + if pids := vals.get('partner_ids'): + new = self.env['res.partner'].browse(pids[0][2]) + if new is not None: + for o in self: + added = new - o.partner_ids + removed = o.partner_ids - new + for p in added: + p._message_log(body=f"added override rights for {o.context!r} on {o.repository_id.name}") + for r in removed: + r._message_log(body=f"removed override rights for {o.context!r} on {o.repository_id.name}") + + return super().write(vals) + + def unlink(self): + for o in self: + for p in o.partner_ids: + p._message_log(body=f"removed override rights for {o.context!r} on {o.repository_id.name}") + return super().unlink() + @api.model def name_search(self, name='', args=None, operator='ilike', limit=100): return self.search((args or []) + [ @@ -110,8 +215,10 @@ class OverrideRights(models.Model): ('repository_id.name', operator, name) ], limit=limit).name_get() - def name_get(self): - return [ - (r.id, f'{r.repository_id.name}: {r.context}' if r.repository_id else r.context) - for r in self - ] + @api.depends('repository_id.name', 'context') + def _compute_display_name(self): + for r in self: + if r.repository_id: + r.display_name = f'{r.repository_id.name}: {r.context}' + else: + r.display_name = r.context diff --git a/runbot_merge/models/stagings_create.py b/runbot_merge/models/stagings_create.py new file mode 100644 index 00000000..55955a0a --- /dev/null +++ b/runbot_merge/models/stagings_create.py @@ -0,0 +1,716 @@ +import base64 +import contextlib +import dataclasses +import io +import json +import logging +import os +import re +from collections.abc import Mapping +from difflib import Differ +from operator import itemgetter +from typing import Dict, Union, Optional, Literal, Callable, Iterator, Tuple, List, TypeAlias + +from werkzeug.datastructures import Headers + +from odoo import api, models, fields, Command +from odoo.tools import OrderedSet, groupby +from .pull_requests import Branch, Stagings, PullRequests, Repository +from .batch import Batch +from .. import exceptions, utils, github, git + +WAIT_FOR_VISIBILITY = [10, 10, 10, 10] +_logger = logging.getLogger(__name__) + + +class Project(models.Model): + _inherit = 'runbot_merge.project' + + +@dataclasses.dataclass(slots=True) +class StagingSlice: + """Staging state for a single repository: + + - gh is a cache for the github proxy object (contains a session for reusing + connection) + - head is the current staging head for the branch of that repo + - working_copy is the local working copy for the staging for that repo + """ + gh: github.GH + head: str + repo: git.Repo + + +StagingState: TypeAlias = Dict[Repository, StagingSlice] + +def try_staging(branch: Branch) -> Optional[Stagings]: + """ Tries to create a staging if the current branch does not already + have one. Returns None if the branch already has a staging or there + is nothing to stage, the newly created staging otherwise. + """ + _logger.info( + "Checking %s (%s) for staging: %s, skip? %s", + branch, branch.name, + branch.active_staging_id, + bool(branch.active_staging_id) + ) + if branch.active_staging_id: + return None + + def log(label: str, batches: Batch) -> None: + _logger.info(label, ', '.join(batches.mapped('prs.display_name'))) + + alone, batches = ready_batches(for_branch=branch) + + if alone: + log("staging high-priority PRs %s", batches) + elif branch.project_id.staging_priority == 'default': + if split := branch.split_ids[:1]: + batches = split.batch_ids + split.unlink() + log("staging split PRs %s (prioritising splits)", batches) + else: + # priority, normal; priority = sorted ahead of normal, so always picked + # first as long as there's room + log("staging ready PRs %s (prioritising splits)", batches) + elif branch.project_id.staging_priority == 'ready': + if batches: + log("staging ready PRs %s (prioritising ready)", batches) + else: + split = branch.split_ids[:1] + batches = split.batch_ids + split.unlink() + log("staging split PRs %s (prioritising ready)", batches) + else: + assert branch.project_id.staging_priority == 'largest' + maxsplit = max(branch.split_ids, key=lambda s: len(s.batch_ids), default=branch.env['runbot_merge.split']) + _logger.info("largest split = %d, ready = %d", len(maxsplit.batch_ids), len(batches)) + # bias towards splits if len(ready) = len(batch_ids) + if len(maxsplit.batch_ids) >= len(batches): + batches = maxsplit.batch_ids + maxsplit.unlink() + log("staging split PRs %s (prioritising largest)", batches) + else: + log("staging ready PRs %s (prioritising largest)", batches) + + if not batches: + return + + original_heads, staging_state = staging_setup(branch, batches) + + staged = stage_batches(branch, batches, staging_state) + + if not staged: + return None + + env = branch.env + heads = [] + commits = [] + for repo, it in staging_state.items(): + if it.head == original_heads[repo] and branch.project_id.uniquifier: + # if we didn't stage anything for that repo and uniquification is + # enabled, create a dummy commit with a uniquifier to ensure we + # don't hit a previous version of the same to ensure the staging + # head is new and we're building everything + project = branch.project_id + uniquifier = base64.b64encode(os.urandom(12)).decode('ascii') + dummy_head = it.repo.with_config(check=True).commit_tree( + # somewhat exceptionally, `commit-tree` wants an actual tree + # not a tree-ish + tree=f'{it.head}^{{tree}}', + parents=[it.head], + author=(project.github_name, project.github_email), + message=f'''\ +force rebuild + +uniquifier: {uniquifier} +For-Commit-Id: {it.head} +''', + ).stdout.strip() + + # see above, ideally we don't need to mark the real head as + # `to_check` because it's an old commit but `DO UPDATE` is necessary + # for `RETURNING` to work, and it doesn't really hurt (maybe) + env.cr.execute( + "INSERT INTO runbot_merge_commit (sha, to_check, statuses) " + "VALUES (%s, false, '{}'), (%s, true, '{}') " + "ON CONFLICT (sha) DO UPDATE SET to_check=true " + "RETURNING id", + [it.head, dummy_head] + ) + ([commit], [head]) = env.cr.fetchall() + it.head = dummy_head + else: + # otherwise just create a record for that commit, or flag existing + # one as to-recheck in case there are already statuses we want to + # propagate to the staging or something + env.cr.execute( + "INSERT INTO runbot_merge_commit (sha, to_check, statuses) " + "VALUES (%s, true, '{}') " + "ON CONFLICT (sha) DO UPDATE SET to_check=true " + "RETURNING id", + [it.head] + ) + [commit] = [head] = env.cr.fetchone() + + heads.append(fields.Command.create({ + 'repository_id': repo.id, + 'commit_id': head, + })) + commits.append(fields.Command.create({ + 'repository_id': repo.id, + 'commit_id': commit, + })) + + # create actual staging object + st: Stagings = env['runbot_merge.stagings'].create({ + 'target': branch.id, + 'staging_batch_ids': [Command.create({'runbot_merge_batch_id': batch.id}) for batch in staged], + 'heads': heads, + 'commits': commits, + }) + for repo, it in staging_state.items(): + _logger.info( + "%s: create staging for %s:%s at %s", + branch.project_id.name, repo.name, branch.name, + it.head + ) + it.repo.stdout(False).check(True).push( + '-f', + git.source_url(repo), + f'{it.head}:refs/heads/staging.{branch.name}', + ) + + _logger.info("Created staging %s (%s) to %s", st, ', '.join( + '%s[%s]' % (batch, batch.prs) + for batch in staged + ), st.target.name) + return st + + +def ready_batches(for_branch: Branch) -> Tuple[bool, Batch]: + env = for_branch.env + # splits are ready by definition, we need to exclude them from the ready + # rows otherwise if a prioritised (alone) PR is part of a split it'll be + # staged through priority *and* through split. + split_ids = for_branch.split_ids.batch_ids.ids + env.cr.execute(""" + SELECT max(priority) + FROM runbot_merge_batch + WHERE blocked IS NULL AND target = %s AND NOT id = any(%s) + """, [for_branch.id, split_ids]) + alone = env.cr.fetchone()[0] == 'alone' + + return ( + alone, + env['runbot_merge.batch'].search([ + ('target', '=', for_branch.id), + ('blocked', '=', False), + ('priority', '=', 'alone') if alone else (1, '=', 1), + ('id', 'not in', split_ids), + ], order="priority DESC, id ASC"), + ) + + +def staging_setup( + target: Branch, + batches: Batch, +) -> Tuple[Dict[Repository, str], StagingState]: + """Sets up the staging: + + - stores baseline info + - creates tmp branch via gh API (to remove) + - generates working copy for each repository with the target branch + """ + by_repo: Mapping[Repository, List[PullRequests]] = \ + dict(groupby(batches.prs, lambda p: p.repository)) + + staging_state = {} + original_heads = {} + for repo in target.project_id.repo_ids.having_branch(target): + gh = repo.github() + head = gh.head(target.name) + + source = git.get_local(repo) + source.fetch( + git.source_url(repo), + # a full refspec is necessary to ensure we actually fetch the ref + # (not just the commit it points to) and update it. + # `git fetch $remote $branch` seems to work locally, but it might + # be hooked only to "proper" remote-tracking branches + # (in `refs/remotes`), it doesn't seem to work here + f'+refs/heads/{target.name}:refs/heads/{target.name}', + *(pr.head for pr in by_repo.get(repo, [])) + ) + original_heads[repo] = head + staging_state[repo] = StagingSlice(gh=gh, head=head, repo=source.stdout().with_config(text=True, check=False)) + + return original_heads, staging_state + + +def stage_batches(branch: Branch, batches: Batch, staging_state: StagingState) -> Stagings: + batch_limit = branch.project_id.batch_limit + env = branch.env + staged = env['runbot_merge.batch'] + for batch in batches: + if len(staged) >= batch_limit: + break + try: + staged |= stage_batch(env, batch, staging_state) + except exceptions.MergeError as e: + pr = e.args[0] + _logger.info("Failed to stage %s into %s", pr.display_name, branch.name) + pr._message_log(body=f"Failed to stage into {branch.name}: {e}") + if not staged or isinstance(e, exceptions.Unmergeable): + if len(e.args) > 1 and e.args[1]: + reason = e.args[1] + else: + reason = e.__cause__ or e.__context__ + # if the reason is a json document, assume it's a github error + # and try to extract the error message to give it to the user + with contextlib.suppress(Exception): + reason = json.loads(str(reason))['message'].lower() + + pr.error = True + env.ref('runbot_merge.pr.merge.failed')._send( + repository=pr.repository, + pull_request=pr.number, + format_args={'pr': pr, 'reason': reason, 'exc': e}, + ) + return staged + + +refline = re.compile(rb'([\da-f]{40}) ([^\0\n]+)(\0.*)?\n?') +ZERO_REF = b'0'*40 + +def parse_refs_smart(read: Callable[[int], bytes]) -> Iterator[Tuple[str, str]]: + """ yields pkt-line data (bytes), or None for flush lines """ + def read_line() -> Optional[bytes]: + length = int(read(4), 16) + if length == 0: + return None + return read(length - 4) + + header = read_line() + assert header and header.rstrip() == b'# service=git-upload-pack', header + assert read_line() is None, "failed to find first flush line" + # read lines until second delimiter + for line in iter(read_line, None): + if line.startswith(ZERO_REF): + break # empty list (no refs) + m = refline.fullmatch(line) + assert m + yield m[1].decode(), m[2].decode() + + +UNCHECKABLE = ['merge_method', 'overrides', 'draft'] + + +def stage_batch(env: api.Environment, batch: Batch, staging: StagingState): + """Stages the batch represented by the ``prs`` recordset, onto the + current corresponding staging heads. + + Alongside returning the newly created batch, updates ``staging[*].head`` + in-place on success. On failure, the heads should not be touched. + + May return an empty recordset on some non-fatal failures. + """ + new_heads: Dict[PullRequests, str] = {} + pr_fields = env['runbot_merge.pull_requests']._fields + for pr in batch.prs: + info = staging[pr.repository] + _logger.info( + "Staging pr %s for target %s; method=%s", + pr.display_name, pr.target.name, + pr.merge_method or (pr.squash and 'single') or None + ) + + try: + method, new_heads[pr] = stage(pr, info, related_prs=(batch.prs - pr)) + _logger.info( + "Staged pr %s to %s by %s: %s -> %s", + pr.display_name, pr.target.name, method, + info.head, new_heads[pr] + ) + except github.MergeError as e: + raise exceptions.MergeError(pr) from e + except exceptions.Mismatch as e: + diff = ''.join(Differ().compare( + list(format_for_difflib((n, v) for n, v, _ in e.args[1])), + list(format_for_difflib((n, v) for n, _, v in e.args[1])), + )) + _logger.info("Failed to stage %s: data mismatch", pr.display_name) + pr._message_log(body=f"data mismatch before merge:\n{diff}") + env.ref('runbot_merge.pr.staging.mismatch')._send( + repository=pr.repository, + pull_request=pr.number, + format_args={ + 'pr': pr, + 'mismatch': ', '.join(pr_fields[f].string for f in e.args[0]), + 'diff': diff, + 'unchecked': ', '.join(pr_fields[f].string for f in UNCHECKABLE) + } + ) + return env['runbot_merge.batch'] + + # update meta to new heads + for pr, head in new_heads.items(): + staging[pr.repository].head = head + return batch + +def format_for_difflib(items: Iterator[Tuple[str, object]]) -> Iterator[str]: + """ Bit of a pain in the ass because difflib really wants + all lines to be newline-terminated, but not all values are + actual lines, and also needs to split multiline values. + """ + for name, value in items: + yield name + ':\n' + value = str(value) + if not value.endswith('\n'): + value += '\n' + yield from value.splitlines(keepends=True) + yield '\n' + + +Method = Literal['merge', 'rebase-merge', 'rebase-ff', 'squash'] +def stage(pr: PullRequests, info: StagingSlice, related_prs: PullRequests) -> Tuple[Method, str]: + # nb: pr_commits is oldest to newest so pr.head is pr_commits[-1] + _, prdict = info.gh.pr(pr.number) + commits = prdict['commits'] + method: Method = pr.merge_method or ('rebase-ff' if commits == 1 else None) + if commits > 50 and method.startswith('rebase'): + raise exceptions.Unmergeable(pr, "Rebasing 50 commits is too much.") + if commits > 250: + raise exceptions.Unmergeable( + pr, "Merging PRs of 250 or more commits is not supported " + "(https://developer.github.com/v3/pulls/#list-commits-on-a-pull-request)" + ) + pr_commits = info.gh.commits(pr.number) + for c in pr_commits: + if not (c['commit']['author']['email'] and c['commit']['committer']['email']): + raise exceptions.Unmergeable( + pr, + f"All commits must have author and committer email, " + f"missing email on {c['sha']} indicates the authorship is " + f"most likely incorrect." + ) + + # sync and signal possibly missed updates + invalid = {} + diff = [] + pr_head = pr_commits[-1]['sha'] + if pr.head != pr_head: + invalid['head'] = pr_head + diff.append(('Head', pr.head, pr_head)) + + if pr.target.name != prdict['base']['ref']: + branch = pr.env['runbot_merge.branch'].with_context(active_test=False).search([ + ('name', '=', prdict['base']['ref']), + ('project_id', '=', pr.repository.project_id.id), + ]) + if not branch: + pr.unlink() + raise exceptions.Unmergeable(pr, "While staging, found this PR had been retargeted to an un-managed branch.") + invalid['target'] = branch.id + diff.append(('Target branch', pr.target.name, branch.name)) + + if pr.squash != commits == 1: + invalid['squash'] = commits == 1 + diff.append(('Single commit', pr.squash, commits == 1)) + + msg = utils.make_message(prdict) + if pr.message != msg: + invalid['message'] = msg + diff.append(('Message', pr.message, msg)) + + if invalid: + pr.write({**invalid, 'reviewed_by': False, 'head': pr_head}) + raise exceptions.Mismatch(invalid, diff) + + if pr.reviewed_by and pr.reviewed_by.name == pr.reviewed_by.github_login: + # XXX: find other trigger(s) to sync github name? + gh_name = info.gh.user(pr.reviewed_by.github_login)['name'] + if gh_name: + pr.reviewed_by.name = gh_name + + match method: + case 'merge': + fn = stage_merge + case 'rebase-merge': + fn = stage_rebase_merge + case 'rebase-ff': + fn = stage_rebase_ff + case 'squash': + fn = stage_squash + + pr_base_tree = info.repo.get_tree(pr_commits[0]['parents'][0]['sha']) + pr_head_tree = pr_commits[-1]['commit']['tree']['sha'] + + merge_base_tree = info.repo.get_tree(info.head) + new_head = fn(pr, info, pr_commits, related_prs=related_prs) + merge_head_tree = info.repo.get_tree(new_head) + + if pr_head_tree != pr_base_tree and merge_head_tree == merge_base_tree: + raise exceptions.MergeError(pr, f'results in an empty tree when merged, might be the duplicate of a merged PR.') + + return method, new_head + +def stage_squash(pr: PullRequests, info: StagingSlice, commits: List[github.PrCommit], related_prs: PullRequests) -> str: + msg = pr._build_message(pr, related_prs=related_prs) + + authors = { + (c['commit']['author']['name'], c['commit']['author']['email']) + for c in commits + } + if len(authors) == 1: + author = authors.pop() + else: + msg.headers.extend(sorted( + ('Co-Authored-By', "%s <%s>" % author) + for author in authors + )) + author = (pr.repository.project_id.github_name, pr.repository.project_id.github_email) + + committers = { + (c['commit']['committer']['name'], c['commit']['committer']['email']) + for c in commits + } + # should committers also be added to co-authors? + committer = committers.pop() if len(committers) == 1 else None + + r = info.repo.merge_tree(info.head, pr.head) + if r.returncode: + raise exceptions.MergeError(pr, r.stderr) + merge_tree = r.stdout.strip() + + r = info.repo.commit_tree( + tree=merge_tree, + parents=[info.head], + message=str(msg), + author=author, + committer=committer or author, + ) + if r.returncode: + raise exceptions.MergeError(pr, r.stderr) + head = r.stdout.strip() + + commits_map = {c['sha']: head for c in commits} + commits_map[''] = head + pr.commits_map = json.dumps(commits_map) + + return head + +def stage_rebase_ff(pr: PullRequests, info: StagingSlice, commits: List[github.PrCommit], related_prs: PullRequests) -> str: + add_self_references(pr, commits, related_prs=related_prs, merge=commits[-1]) + + _logger.debug("rebasing %s on %s (commits=%s)", + pr.display_name, info.head, len(commits)) + head, mapping = info.repo.rebase(info.head, commits=commits) + pr.commits_map = json.dumps({**mapping, '': head}) + return head + +def stage_rebase_merge(pr: PullRequests, info: StagingSlice, commits: List[github.PrCommit], related_prs: PullRequests) -> str : + add_self_references(pr, commits, related_prs=related_prs) + _logger.debug("rebasing %s on %s (commits=%s)", + pr.display_name, info.head, len(commits)) + h, mapping = info.repo.rebase(info.head, commits=commits) + msg = pr._build_message(pr, related_prs=related_prs) + + project = pr.repository.project_id + merge_head= info.repo.merge( + info.head, h, str(msg), + author=(project.github_name, project.github_email), + ) + pr.commits_map = json.dumps({**mapping, '': merge_head}) + return merge_head + +def stage_merge(pr: PullRequests, info: StagingSlice, commits: List[github.PrCommit], related_prs: PullRequests) -> str: + pr_head = commits[-1] # oldest to newest + base_commit = None + head_parents = {p['sha'] for p in pr_head['parents']} + if len(head_parents) > 1: + # look for parent(s?) of pr_head not in PR, means it's + # from target (so we merged target in pr) + merge = head_parents - {c['sha'] for c in commits} + external_parents = len(merge) + if external_parents > 1: + raise exceptions.Unmergeable( + "The PR head can only have one parent from the base branch " + "(not part of the PR itself), found %d: %s" % ( + external_parents, + ', '.join(merge) + )) + if external_parents == 1: + [base_commit] = merge + + commits_map = {c['sha']: c['sha'] for c in commits} + if base_commit: + # replicate pr_head with base_commit replaced by + # the current head + t = info.repo.merge_tree(info.head, pr_head['sha']) + if t.returncode: + raise exceptions.MergeError(pr, t.stderr) + merge_tree = t.stdout.strip() + new_parents = [info.head] + list(head_parents - {base_commit}) + msg = pr._build_message(pr_head['commit']['message'], related_prs=related_prs) + + d2t = itemgetter('name', 'email', 'date') + c = info.repo.commit_tree( + tree=merge_tree, + parents=new_parents, + message=str(msg), + author=d2t(pr_head['commit']['author']), + committer=d2t(pr_head['commit']['committer']), + ) + if c.returncode: + raise exceptions.MergeError(pr, c.stderr) + copy = c.stdout.strip() + + # merge commit *and old PR head* map to the pr head replica + commits_map[''] = commits_map[pr_head['sha']] = copy + pr.commits_map = json.dumps(commits_map) + return copy + else: + # otherwise do a regular merge + msg = pr._build_message(pr) + project = pr.repository.project_id + merge_head = info.repo.merge( + info.head, pr.head, str(msg), + author=(project.github_name, project.github_email), + ) + # and the merge commit is the normal merge head + commits_map[''] = merge_head + pr.commits_map = json.dumps(commits_map) + return merge_head + +def is_mentioned(message: Union[PullRequests, str], pr: PullRequests, *, full_reference: bool = False) -> bool: + """Returns whether ``pr`` is mentioned in ``message``` + """ + if full_reference: + pattern = fr'\b{re.escape(pr.display_name)}\b' + else: + repository = pr.repository.name # .replace('/', '\\/') + pattern = fr'( |\b{repository})#{pr.number}\b' + return bool(re.search(pattern, message if isinstance(message, str) else message.message)) + +def add_self_references( + pr: PullRequests, + commits: List[github.PrCommit], + related_prs: PullRequests, + merge: Optional[github.PrCommit] = None, +): + """Adds a footer reference to ``self`` to all ``commits`` if they don't + already refer to the PR. + """ + for c in (c['commit'] for c in commits): + c['message'] = str(pr._build_message( + c['message'], + related_prs=related_prs, + merge=merge and c['url'] == merge['commit']['url'], + )) + +BREAK = re.compile(r''' + [ ]{0,3} # 0-3 spaces of indentation + # followed by a sequence of three or more matching -, _, or * characters, + # each followed optionally by any number of spaces or tabs + # so needs to start with a _, - or *, then have at least 2 more such + # interspersed with any number of spaces or tabs + ([*_-]) + ([ \t]*\1){2,} + [ \t]* +''', flags=re.VERBOSE) +SETEX_UNDERLINE = re.compile(r''' + [ ]{0,3} # no more than 3 spaces indentation + [-=]+ # a sequence of = characters or a sequence of - characters + [ ]* # any number of trailing spaces + # we don't care about "a line containing a single -" because we want to + # disambiguate SETEX headings from thematic breaks, and thematic breaks have + # 3+ -. Doesn't look like GH interprets `- - -` as a line so yay... +''', flags=re.VERBOSE) +HEADER = re.compile('([A-Za-z-]+): (.*)') +class Message: + @classmethod + def from_message(cls, msg: Union[PullRequests, str]) -> 'Message': + in_headers = True + maybe_setex = None + # creating from PR message -> remove content following break + if isinstance(msg, str): + message, handle_break = (msg, False) + else: + message, handle_break = (msg.message, True) + headers = [] + body: List[str] = [] + # don't process the title (first line) of the commit message + lines = message.splitlines() + for line in reversed(lines[1:]): + if maybe_setex: + # NOTE: actually slightly more complicated: it's a SETEX heading + # only if preceding line(s) can be interpreted as a + # paragraph so e.g. a title followed by a line of dashes + # would indeed be a break, but this should be good enough + # for now, if we need more we'll need a full-blown + # markdown parser probably + if line: # actually a SETEX title -> add underline to body then process current + body.append(maybe_setex) + else: # actually break, remove body then process current + body = [] + maybe_setex = None + + if not line: + if not in_headers and body and body[-1]: + body.append(line) + continue + + if handle_break and BREAK.fullmatch(line): + if SETEX_UNDERLINE.fullmatch(line): + maybe_setex = line + else: + body = [] + continue + + h = HEADER.fullmatch(line) + if h: + # c-a-b = special case from an existing test, not sure if actually useful? + if in_headers or h[1].lower() == 'co-authored-by': + headers.append(h.groups()) + continue + + body.append(line) + in_headers = False + + # if there are non-title body lines, add a separation after the title + if body and body[-1]: + body.append('') + body.append(lines[0]) + return cls('\n'.join(reversed(body)), Headers(reversed(headers))) + + def __init__(self, body: str, headers: Optional[Headers] = None): + self.body = body + self.headers = headers or Headers() + + def __setattr__(self, name, value): + # make sure stored body is always stripped + if name == 'body': + value = value and value.strip() + super().__setattr__(name, value) + + def __str__(self): + if not self.headers: + return self.body.rstrip() + '\n' + + with io.StringIO() as msg: + msg.write(self.body.rstrip()) + msg.write('\n\n') + # https://git.wiki.kernel.org/index.php/CommitMessageConventions + # seems to mostly use capitalised names (rather than title-cased) + keys = list(OrderedSet(k.capitalize() for k in self.headers.keys())) + # c-a-b must be at the very end otherwise github doesn't see it + keys.sort(key=lambda k: k == 'Co-authored-by') + for k in keys: + for v in self.headers.getlist(k): + msg.write(k) + msg.write(': ') + msg.write(v) + msg.write('\n') + + return msg.getvalue() diff --git a/runbot_merge/models/utils.py b/runbot_merge/models/utils.py new file mode 100644 index 00000000..a66fe0b4 --- /dev/null +++ b/runbot_merge/models/utils.py @@ -0,0 +1,201 @@ +import logging +from contextvars import ContextVar +from typing import Tuple +from xml.etree.ElementTree import Element, tostring + +import markdown.inlinepatterns +import markdown.treeprocessors +from markupsafe import escape, Markup + + +def enum(model: str, field: str) -> Tuple[str, str]: + n = f'{model.replace(".", "_")}_{field}_type' + return n, n + + +def readonly(_): + raise TypeError("Field is readonly") + + +DFM_CONTEXT_REPO = ContextVar("dfm_context", default="") +def dfm(repository: str, text: str) -> Markup: + """ Converts the input text from markup to HTML using the Odoo PR + Description Rules, which are basically: + + - GFM + - minus raw HTML (?) + - + github's autolinking (https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/autolinked-references-and-urls) + - + bespoke autolinking of OPW and Task links to odoo.com + """ + t = DFM_CONTEXT_REPO.set(repository) + try: + return Markup(dfm_renderer.convert(escape(text))) + finally: + DFM_CONTEXT_REPO.reset(t) + + +class DfmExtension(markdown.extensions.Extension): + def extendMarkdown(self, md): + md.registerExtensions(['fenced_code', 'footnotes', 'nl2br', 'sane_lists', 'tables'], configs={}) + md.inlinePatterns.register(GithubLinking(md), 'githublinking', 123) + md.inlinePatterns.register(OdooLinking(md), 'odoolinking', 124) + # ideally the unlinker should run before the prettifier so the + # prettification is done correctly, but it seems unlikely the prettifier + # handles the variable nature of links correctly, and we likely want to + # run after the unescaper + md.treeprocessors.register(Unlinker(), "unlinker", -10) + +class GithubLinking(markdown.inlinepatterns.InlineProcessor): + """Aside from being *very* varied github links are *contextual*. That is, + their resolution depends on the repository they're being called from + (technically they also need all the information from the github backend to + know the people & objects exist but we don't have that option). + + Context is not available to us, but we can fake it through the application + of contextvars: ``DFM_CONTEXT_REPO`` should contain the full name of the + repository this is being resolved from. + + If ``DFM_CONTEXT_REPO`` is empty and needed, this processor emits a warning. + """ + def __init__(self, md=None): + super().__init__(r"""(?xi) +(?: + \bhttps://github.com/([\w\.-]+/[\w\.-]+)/(?:issues|pull)/(\d+)(\#[\w-]+)? +| \bhttps://github.com/([\w\.-]+/[\w\.-]+)/commit/([a-f0-9]+) +| \b([\w\.-]+/[\w\.-]+)\#(\d+) +| (\bGH-|(?:^|(?<=\s))\#)(\d+) +| \b(?: + # user@sha or user/repo@sha + ([\w\.-]+(?:/[\w\.-]+)?) + @ + ([0-9a-f]{7,40}) + ) +| \b( + # a sha is 7~40 hex digits but that means any million+ number matches + # which is probably wrong. So ensure there's at least one letter in the + # set by using a positive lookahead which looks for a sequence of at + # least 0 numbers followed by a-f + (?=[0-9]{0,39}?[a-f]) + [0-9a-f]{7,40} + ) +) +\b +""", md) + + def handleMatch(self, m, data): + ctx = DFM_CONTEXT_REPO.get() + if not ctx: + logging.getLogger(__name__)\ + .getChild("github_links")\ + .warning("missing context for rewriting github links, skipping") + return m[0], *m.span() + + repo = issue = commit = None + if m[2]: # full issue / PR + repo = m[1] + issue = m[2] + elif m[5]: # long hash + repo = m[4] + commit = m[5] + elif m[7]: # short issue with repo + repo = m[6] + issue = m[7] + elif m[9]: # short issue without repo + repo = None if m[8] == '#' else "GH" + issue = m[9] + elif m[11]: # medium hash + repo = m[10] + commit = m[11] + else: # hash only + commit = m[12] + + el = Element("a") + if issue is not None: + if repo == "GH": + el.text = f"GH-{issue}" + repo = ctx + elif repo in (None, ctx): + repo = ctx + el.text = f"#{issue}" + else: + el.text = f"{repo}#{issue}" + + if (fragment := m[3]) and fragment.startswith('#issuecomment-'): + el.text += ' (comment)' + else: + fragment = '' + el.set('href', f"https://github.com/{repo}/issues/{issue}{fragment}") + else: + if repo in (None, ctx): + label_repo = "" + repo = ctx + elif '/' not in repo: # owner-only + label_repo = repo + # NOTE: I assume in reality we're supposed to find the actual fork if unambiguous... + repo = repo + '/' + ctx.split('/')[-1] + elif repo.split('/')[-1] == ctx.split('/')[-1]: + # NOTE: here we assume if it's the same repo in a different owner it's a fork + label_repo = repo.split('/')[0] + else: + label_repo = repo + el.text = f"{label_repo}@{commit}" if label_repo else commit + el.set("href", f"https://github.com/{repo}/commit/{commit}") + return el, *m.span() + + +class OdooLinking(markdown.inlinepatterns.InlineProcessor): + def __init__(self, md=None): + # there are other weirder variations but fuck em, this matches + # "opw", "task", "task-id" or "taskid" followed by an optional - or : + # followed by digits + super().__init__(r"(?i)\b(task(?:-?id)?|opw)\s*[-:]?\s*(\d+)\b", md) + + def handleMatch(self, m, data): + el = Element("a", href='https://www.odoo.com/web#model=project.task&id=' + m[2]) + if m[1].lower() == 'opw': + el.text = f"opw-{m[2]}" + else: + el.text = f"task-{m[2]}" + return el, *m.span() + + +class Unlinker(markdown.treeprocessors.Treeprocessor): + def run(self, root): + # find all elements which contain a link, as ElementTree does not have + # parent links we can't really replace links in place + for parent in root.iterfind('.//*[a]'): + children = parent[:] + # can't use clear because that clears the attributes and tail/text + del parent[:] + for el in children: + if el.tag != 'a' or el.get('href', '').startswith(('https:', 'http:')): + parent.append(el) + continue + + # this is a weird link, remove it + + if el.text: # first attach its text to the previous element + if len(parent): # prev is not parent + parent[-1].tail = (parent[-1].tail or '') + el.text + else: + parent.text = (parent.text or '') + el.text + + if len(el): # then unpack all its children + parent.extend(el[:]) + + if el.tail: # then attach tail to previous element + if len(parent): # prev is not parent + parent[-1].tail = (parent[-1].tail or '') + el.tail + else: + parent.text = (parent.text or '') + el.tail + + return None + + +# alternatively, use cmarkgfm? The maintainer of py-gfm (impl'd over +# python-markdown) ultimately gave up, if apparently mostly due to pymarkdown's +# tendency to break its API all the time +dfm_renderer = markdown.Markdown( + extensions=[DfmExtension()], + output_format='html5', +) diff --git a/runbot_merge/security/ir.model.access.csv b/runbot_merge/security/ir.model.access.csv index a6cb79ba..76bbf09d 100644 --- a/runbot_merge/security/ir.model.access.csv +++ b/runbot_merge/security/ir.model.access.csv @@ -9,15 +9,20 @@ access_runbot_merge_repository_status_admin,Admin access to repo statuses,model_ access_runbot_merge_branch_admin,Admin access to branches,model_runbot_merge_branch,runbot_merge.group_admin,1,1,1,1 access_runbot_merge_pull_requests_admin,Admin access to PR,model_runbot_merge_pull_requests,runbot_merge.group_admin,1,1,1,1 access_runbot_merge_pull_requests_tagging_admin,Admin access to tagging,model_runbot_merge_pull_requests_tagging,runbot_merge.group_admin,1,1,1,1 +access_runbot_merge_pull_requests_split_admin,Admin access to batch split wizard,model_runbot_merge_pull_requests_split_off,runbot_merge.group_admin,1,1,1,1 access_runbot_merge_commit_admin,Admin access to commits,model_runbot_merge_commit,runbot_merge.group_admin,1,1,1,1 access_runbot_merge_stagings_admin,Admin access to stagings,model_runbot_merge_stagings,runbot_merge.group_admin,1,1,1,1 +access_runbot_merge_stagings_heads_admin,Admin access to staging heads,model_runbot_merge_stagings_heads,runbot_merge.group_admin,1,1,1,1 +access_runbot_merge_stagings_commits_admin,Admin access to staging commits,model_runbot_merge_stagings_commits,runbot_merge.group_admin,1,1,1,1 access_runbot_merge_stagings_cancel_admin,Admin access to cancelling stagings,model_runbot_merge_stagings_cancel,runbot_merge.group_admin,1,1,1,1 access_runbot_merge_split_admin,Admin access to splits,model_runbot_merge_split,runbot_merge.group_admin,1,1,1,1 access_runbot_merge_batch_admin,Admin access to batches,model_runbot_merge_batch,runbot_merge.group_admin,1,1,1,1 +access_runbot_merge_staging_batch_admin,Admin access to batch/staging link,model_runbot_merge_staging_batch,runbot_merge.group_admin,1,1,1,1 access_runbot_merge_fetch_job_admin,Admin access to fetch jobs,model_runbot_merge_fetch_job,runbot_merge.group_admin,1,1,1,1 access_runbot_merge_pull_requests_feedback_admin,Admin access to feedback,model_runbot_merge_pull_requests_feedback,runbot_merge.group_admin,1,1,1,1 access_runbot_merge_review_rights,Admin access to review permissions,model_res_partner_review,runbot_merge.group_admin,1,1,1,1 access_runbot_merge_review_override,Admin access to override permissions,model_res_partner_override,runbot_merge.group_admin,1,1,1,1 +access_runbot_merge_events_sources,Admin access to event sources,model_runbot_merge_events_sources,runbot_merge.group_admin,1,1,1,1 access_runbot_merge_project,User access to project,model_runbot_merge_project,base.group_user,1,0,0,0 access_runbot_merge_repository,User access to repo,model_runbot_merge_repository,base.group_user,1,0,0,0 access_runbot_merge_branch,User access to branches,model_runbot_merge_branch,base.group_user,1,0,0,0 @@ -25,3 +30,5 @@ access_runbot_merge_pull_requests,User access to PR,model_runbot_merge_pull_requ access_runbot_merge_pull_requests_feedback,Users have no reason to access feedback,model_runbot_merge_pull_requests_feedback,,0,0,0,0 access_runbot_merge_review_rights_2,Users can see partners,model_res_partner_review,base.group_user,1,0,0,0 access_runbot_merge_review_override_2,Users can see partners,model_res_partner_override,base.group_user,1,0,0,0 +runbot_merge.access_runbot_merge_pull_requests_feedback_template,access_runbot_merge_pull_requests_feedback_template,runbot_merge.model_runbot_merge_pull_requests_feedback_template,base.group_system,1,1,0,0 + diff --git a/runbot_merge/security/security.xml b/runbot_merge/security/security.xml index 62e1f323..d1ea1c79 100644 --- a/runbot_merge/security/security.xml +++ b/runbot_merge/security/security.xml @@ -5,4 +5,7 @@ <record model="res.groups" id="base.group_system"> <field name="implied_ids" eval="[(4, ref('runbot_merge.group_admin'))]"/> </record> + <record model="res.groups" id="status"> + <field name="name">Mergebot Status Sender</field> + </record> </odoo> diff --git a/runbot_merge/sentry.py b/runbot_merge/sentry.py new file mode 100644 index 00000000..0bf71a61 --- /dev/null +++ b/runbot_merge/sentry.py @@ -0,0 +1,117 @@ +import logging +from os import environ + +import sentry_sdk +from sentry_sdk.integrations.logging import LoggingIntegration +from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware + +from odoo import http +from odoo.addons.base.models.ir_cron import ir_cron +from odoo.http import HttpDispatcher, JsonRPCDispatcher + +from .exceptions import FastForwardError, MergeError, Unmergeable + + +def delegate(self, attr): + return getattr(self.app, attr) +SentryWsgiMiddleware.__getattr__ = delegate + +def enable_sentry(): + logger = logging.getLogger('runbot_merge') + + dsn = environ.get('SENTRY_DSN') + if not dsn: + logger.info("No DSN found, skipping sentry...") + return + + try: + setup_sentry(dsn) + except Exception: + logger.exception("DSN found, failed to enable sentry...") + else: + logger.info("DSN found, sentry enabled...") + + +def setup_sentry(dsn): + sentry_sdk.init( + dsn, + auto_session_tracking=False, + # traces_sample_rate=1.0, + integrations=[ + # note: if the colorformatter is enabled, sentry gets lost + # and classifies everything as errors because it fails to + # properly classify levels as the colorformatter injects + # the ANSI color codes right into LogRecord.levelname + LoggingIntegration(level=logging.INFO, event_level=logging.WARNING), + ], + before_send=event_filter, + # apparently not in my version of the sdk + # functions_to_trace = [] + ) + http.root = SentryWsgiMiddleware(http.root) + instrument_odoo() + +def instrument_odoo(): + """Monkeypatches odoo core to copy odoo metadata into sentry for more + informative events + """ + # add user to wsgi request context + for d in [HttpDispatcher, JsonRPCDispatcher]: + def dispatch(self, endpoint, args, old_dispatch=d.dispatch): + if self.request.uid: + sentry_sdk.set_user({ + 'id': self.request.uid, + 'email': self.request.env.user.email, + 'username': self.request.env.user.login, + }) + else: + sentry_sdk.set_user({'username': '<public>'}) + return old_dispatch(self, endpoint, args) + d.dispatch = dispatch + + # create transaction for tracking crons, add user to that + old_callback = ir_cron._callback + def _callback(self, cron_name, server_action_id, job_id): + sentry_sdk.start_transaction(name=f"cron {cron_name}") + sentry_sdk.set_user({ + 'id': self.env.user.id, + 'email': self.env.user.email, + 'username': self.env.user.login, + }) + return old_callback(self, cron_name, server_action_id, job_id) + ir_cron._callback = _callback + +dummy_record = logging.LogRecord(name="", level=logging.NOTSET, pathname='', lineno=0, msg='', args=(), exc_info=None) +# mapping of exception types to predicates, if the predicate returns `True` the +# exception event should be suppressed +SUPPRESS_EXCEPTION = { + # Someone else deciding to push directly to the branch (which is generally + # what leads to this error) is not really actionable. + # + # Other possibilities are more structural and thus we probably want to know: + # - other 422 Unprocessable github errors (likely config issues): + # - reference does not exist + # - object does not exist + # - object is not a commit + # - branch protection issue + # - timeout on ref update (github probably dying) + # - other HTTP error (also github probably dying) + # + # might be worth using richer exceptions to make this clearer, and easier to classify + FastForwardError: lambda e: 'not a fast forward' in str(e.__cause__), + # Git conflict when merging (or non-json response which is weird), + # notified on PR + MergeError: lambda _: True, + # Failed preconditions on merging, notified on PR + Unmergeable: lambda _: True, +} +def event_filter(event, hint): + # event['level'], event['logger'], event['logentry'], event['exception'] + # known hints: log_record: LogRecord, exc_info: (type, BaseExeption, Traceback) | None + exc_info = hint.get('exc_info') or hint.get('log_record', dummy_record).exc_info + if exc_info: + etype, exc, _ = exc_info + if SUPPRESS_EXCEPTION.get(etype, lambda _: False)(exc): + return None + + diff --git a/runbot_merge/static/scss/runbot_merge.scss b/runbot_merge/static/scss/runbot_merge.scss index 2b98cf32..ea2a12d4 100644 --- a/runbot_merge/static/scss/runbot_merge.scss +++ b/runbot_merge/static/scss/runbot_merge.scss @@ -14,27 +14,30 @@ h1, h2, h3, h4, h5, h6{ margin-bottom: 0.33em; } h5 { font-size: 1em; } -.bg-success, .bg-info, .bg-warning, .bg-danger, .bg-gray-lighter { +.bg-success, .bg-info, .bg-warning, .bg-danger, .bg-gray-lighter, +.table-success, .table-info, .table-warning, .table-danger { color: inherit; } .dropdown-item, .dropdown-menu, .dropdown-menu a { color: inherit; } -.bg-success { - background-color: #dff0d8 !important; + +$mergebot-colors: ("success": #dff0d8, "danger": #f2dede, "warning": #fcf8e3, "info": #d9edf7); +@each $category, $color in $mergebot-colors { + .bg-#{$category} { + background-color: $color !important; + } + .table-#{$category} { + background-color: $color !important; + &.table-active { + background-color: scale-color($color, $lightness: -5%) !important; + } + } } .bg-unmerged { background-color: #dcefe8 !important } -.bg-info { - background-color: #d9edf7 !important; -} -.bg-warning { - background-color: #fcf8e3 !important; -} -.bg-danger { - background-color: #f2dede !important; -} + .list-inline { margin-bottom: 10px; } @@ -79,6 +82,11 @@ h5 { font-size: 1em; } .batch a:not(:last-of-type) a:after { content: ","; } + + button.dropdown-toggle { + text-align: left; + white-space: wrap; + } } .pr-listing > * { display: inline-block; } .pr-awaiting { opacity: 0.8; } @@ -110,3 +118,27 @@ dl.runbot-merge-fields { .staging-statuses { cursor: wait; } + +/* forwardport */ +.outstanding-partners > * { + @extend .pt-1; + // because there's a trailing space which is annoying to remove, which plays + // the role of padding-right + @extend .pl-1; + @extend .text-nowrap; + // works better for the left edge of the *box* + @extend .border-left; +} + +// batches sequence table in PR dashboard: mostly uses (customised) bootstrap +// but some of the style is bespoke because inline styles don't work well with +// CSP +.closed { + text-decoration: line-through; +} +tr.inactive { + opacity: 0.5; +} +td.detached { + border-top: 2px solid map-get($theme-colors, "danger"); +} diff --git a/runbot_merge/tests/conftest.py b/runbot_merge/tests/conftest.py index 75cad7a8..17b1546e 100644 --- a/runbot_merge/tests/conftest.py +++ b/runbot_merge/tests/conftest.py @@ -1,36 +1,9 @@ import pytest -import requests @pytest.fixture() def module(): return 'runbot_merge' -@pytest.fixture -def page(port): - s = requests.Session() - def get(url): - r = s.get('http://localhost:{}{}'.format(port, url)) - r.raise_for_status() - return r.content - return get - -@pytest.fixture -def default_crons(): - return [ - # env['runbot_merge.project']._check_fetch() - 'runbot_merge.fetch_prs_cron', - # env['runbot_merge.commit']._notify() - 'runbot_merge.process_updated_commits', - # env['runbot_merge.project']._check_stagings() - 'runbot_merge.merge_cron', - # env['runbot_merge.project']._create_stagings() - 'runbot_merge.staging_cron', - # env['runbot_merge.pull_requests']._check_linked_prs_statuses() - 'runbot_merge.check_linked_prs_status', - # env['runbot_merge.pull_requests.feedback']._send() - 'runbot_merge.feedback_cron', - ] - @pytest.fixture def project(env, config): return env['runbot_merge.project'].create({ @@ -39,3 +12,30 @@ def project(env, config): 'github_prefix': 'hansen', 'branch_ids': [(0, 0, {'name': 'master'})], }) + + +@pytest.fixture +def make_repo2(env, project, make_repo, users, setreviewers): + """Layer over ``make_repo`` which also: + + - adds the new repo to ``project`` (with no group and the ``'default'`` status required) + - sets the standard reviewers on the repo + - and creates an event source for the repo + """ + def mr(name): + r = make_repo(name) + rr = env['runbot_merge.repository'].create({ + 'project_id': project.id, + 'name': r.name, + 'group_id': False, + 'required_statuses': 'default', + }) + setreviewers(rr) + env['runbot_merge.events_sources'].create({'repository': r.name}) + return r + return mr + + +@pytest.fixture +def repo(make_repo2): + return make_repo2('repo') diff --git a/runbot_merge/tests/test_basic.py b/runbot_merge/tests/test_basic.py index 84bdde29..f46b9d79 100644 --- a/runbot_merge/tests/test_basic.py +++ b/runbot_merge/tests/test_basic.py @@ -3,26 +3,55 @@ import itertools import json import textwrap import time +from typing import Callable from unittest import mock import pytest import requests -from lxml import html, etree +from lxml import html import odoo -from utils import _simple_init, seen, re_matches, get_partner, Commit, pr_page, to_pr, part_of +from utils import _simple_init, seen, matches, get_partner, Commit, pr_page, to_pr, part_of, ensure_one +@pytest.fixture(autouse=True) +def _configure_statuses(request, project, repo): + if 'defaultstatuses' not in request.keywords: + project.repo_ids.required_statuses = 'legal/cla,ci/runbot' -@pytest.fixture -def repo(env, project, make_repo, users, setreviewers): - r = make_repo('repo') - project.write({'repo_ids': [(0, 0, { - 'name': r.name, - 'group_id': False, - 'required_statuses': 'legal/cla,ci/runbot' - })]}) - setreviewers(*project.repo_ids) - return r +@pytest.fixture(autouse=True, params=["statuses", "rpc"]) +def stagings(request, env, project, repo): + """Hook in support for validating stagings via RPC calls instead of CI + webhooks. Transparent for the tests as long as they send statuses to + symbolic refs (branch names) rather than commits, although commits *would* + probably be doable (look up the head for the commit, then what staging it's + part of) + """ + if request.param == "statuses": + yield + else: + env['res.users'].browse([env._uid]).write({ + "groups_id": [(4, env.ref("runbot_merge.status").id, {})] + }) + project.write({ + "staging_rpc": True, + "staging_statuses": False, + }) + RepoType = type(repo) + # apparently side_effect + wraps on unbound method don't work correctly, + # the wrapped method does get called when returning DEFAULT but *the + # instance (subject) is not sent along for the ride* so the call fails. + post_status = RepoType.post_status + def _post_status(repo, ref, status, context='default', **kw): + if not ref.startswith(('staging.', 'heads/staging.')): + return post_status(repo, ref, status, context, **kw) + + c = repo.commit(ref) + branchname = ref.removeprefix('staging.').removeprefix('heads/staging.') + env['runbot_merge.stagings'].search([('target.name', '=', branchname)])\ + .post_status(c.id, context, status, **kw) + + with mock.patch.object(RepoType, "post_status", _post_status): + yield def test_trivial_flow(env, repo, page, users, config): # create base branch @@ -38,6 +67,13 @@ def test_trivial_flow(env, repo, page, users, config): ) pr = repo.make_pr(title="gibberish", body="blahblah", target='master', head='other') + [c2] = repo.make_commits( + 'other', + Commit('forgot a bit', tree={'whee': 'kjfdsh'}), + ref='heads/other', + make=False, + ) + pr_id = to_pr(env, pr) assert pr_id.state == 'opened' env.run_crons() @@ -52,19 +88,12 @@ def test_trivial_flow(env, repo, page, users, config): [e.text_content() for e in pr_dashboard.cssselect('dl.runbot-merge-fields dd')], )) == { 'label': f"{config['github']['owner']}:other", - 'head': c1, - 'target': 'master', + 'head': c2, } with repo: - repo.post_status(c1, 'success', 'legal/cla') - # rewrite status payload in old-style to ensure it does not break - c = env['runbot_merge.commit'].search([('sha', '=', c1)]) - c.statuses = json.dumps({k: v['state'] for k, v in json.loads(c.statuses).items()}) - - with repo: - repo.post_status(c1, 'success', 'ci/runbot') - + repo.post_status(c2, 'success', 'legal/cla') + repo.post_status(c2, 'success', 'ci/runbot') env.run_crons() assert pr_id.state == 'validated' @@ -88,12 +117,10 @@ def test_trivial_flow(env, repo, page, users, config): assert pr_page(page, pr).cssselect('.alert-primary') with repo: - # get head of staging branch - staging_head = repo.commit('heads/staging.master') - repo.post_status(staging_head.id, 'success', 'ci/runbot', target_url='http://foo.com/pog') - repo.post_status(staging_head.id, 'success', 'legal/cla') + repo.post_status('staging.master', 'success', 'ci/runbot', target_url='http://foo.com/pog') + repo.post_status('staging.master', 'success', 'legal/cla') # the should not block the merge because it's not part of the requirements - repo.post_status(staging_head.id, 'failure', 'ci/lint', target_url='http://ignored.com/whocares') + repo.post_status('staging.master', 'failure', 'ci/lint', target_url='http://ignored.com/whocares') # need to store this because after the crons have run the staging will # have succeeded and been disabled st = pr_id.staging_id @@ -122,11 +149,54 @@ def test_trivial_flow(env, repo, page, users, config): assert repo.read_tree(master) == { 'a': 'some other content', 'b': 'a second file', + 'whee': 'kjfdsh', } assert master.message == "gibberish\n\nblahblah\n\ncloses {repo.name}#1"\ "\n\nSigned-off-by: {reviewer.formatted_email}"\ .format(repo=repo, reviewer=get_partner(env, users['reviewer'])) + def get_tracking_values(record): + field_type = record.field_id.ttype + if not isinstance(field_type, str): + raise TypeError(f"{field_type!r} can't be a field type") + + if field_type in ('integer', 'float', 'char', 'text', 'monetary', 'datetime'): + return record[f'old_value_{field_type}'], record[f'new_value_{field_type}'] + elif field_type == 'date': + v1, v2 = record.old_value_datetime, record.new_value_datetime + return v1 and v1[:10], v2 and v2[:10] + elif field_type == 'boolean': + return bool(record.old_value_integer), bool(record.new_value_integer) + else: + return record.old_value_char, record.new_value_char + + # reverse because the messages are in newest-to-oldest by default + # (as that's how you want to read them) + messages = reversed([ + (m.author_id.display_name, m.body, [get_tracking_values(v) for v in m.tracking_value_ids]) + for m in pr_id.message_ids + ]) + + assert list(messages) == [ + (users['user'], '<p>Pull Request created</p>', []), + (users['user'], '', [(c1, c2)]), + ('OdooBot', f'<p>statuses changed on {c2}</p>', [('Opened', 'Validated')]), + # reviewer approved changing the state and setting reviewer as reviewer + # plus set merge method + ('Reviewer', '', [ + ('', 'rebase and merge, using the PR as merge commit message'), + ('', 'Reviewer'), + ('Validated', 'Ready'), + ]), + # staging succeeded + (matches('$$'), f'<p>staging {st.id} succeeded</p>', [ + # set merge date + (False, pr_id.merge_date), + # updated state + ('Ready', 'Merged'), + ]), + ] + class TestCommitMessage: def test_commit_simple(self, env, repo, users, config): """ verify 'closes ...' is correctly added in the commit message @@ -298,11 +368,15 @@ Co-authored-by: Bob <bob@example.com>""".format( ) class TestWebhookSecurity: + @pytest.fixture(autouse=True) + def add_secret_to_source(self, env, repo): + env['runbot_merge.events_sources'].search([ + ('repository', '=', repo.name), + ]).secret = "a secret" + def test_no_secret(self, env, project, repo): """ Test 1: didn't add a secret to the repo, should be ignored """ - project.secret = "a secret" - with repo: m = repo.make_commit(None, "initial", None, tree={'a': 'some content'}) repo.make_ref('heads/master', m) @@ -316,7 +390,6 @@ class TestWebhookSecurity: ]) def test_wrong_secret(self, env, project, repo): - project.secret = "a secret" with repo: repo.set_secret("wrong secret") @@ -332,7 +405,6 @@ class TestWebhookSecurity: ]) def test_correct_secret(self, env, project, repo): - project.secret = "a secret" with repo: repo.set_secret("a secret") @@ -382,18 +454,16 @@ def test_staging_ongoing(env, repo, config): ]) assert p_2.state == 'ready', "PR2 should not have been staged since there is a pending staging for master" - staging_head = repo.commit('heads/staging.master') with repo: - repo.post_status(staging_head.id, 'success', 'ci/runbot') - repo.post_status(staging_head.id, 'success', 'legal/cla') + repo.post_status('staging.master', 'success', 'ci/runbot') + repo.post_status('staging.master', 'success', 'legal/cla') env.run_crons() assert pr1.state == 'merged' assert p_2.staging_id - staging_head = repo.commit('heads/staging.master') with repo: - repo.post_status(staging_head.id, 'success', 'ci/runbot') - repo.post_status(staging_head.id, 'success', 'legal/cla') + repo.post_status('staging.master', 'success', 'ci/runbot') + repo.post_status('staging.master', 'success', 'legal/cla') env.run_crons() assert p_2.state == 'merged' @@ -435,6 +505,7 @@ def test_staging_concurrent(env, repo, config): ]) assert pr2.staging_id + def test_staging_conflict_first(env, repo, users, config, page): """ If the first batch of a staging triggers a conflict, the PR should be marked as in error @@ -465,6 +536,7 @@ def test_staging_conflict_first(env, repo, users, config, page): assert dangerbox assert dangerbox[0].text.strip() == 'Unable to stage PR' + def test_staging_conflict_second(env, repo, users, config): """ If the non-first batch of a staging triggers a conflict, the PR should just be skipped: it might be a conflict with an other PR which could fail @@ -504,19 +576,32 @@ def test_staging_conflict_second(env, repo, users, config): assert pr1_id.state == 'error', "now pr1 should be in error" -def test_staging_ci_timeout(env, repo, config, page): +@pytest.mark.defaultstatuses +@pytest.mark.parametrize('update_op', [ + pytest.param( + lambda _: {'timeout_limit': datetime.datetime.now().isoformat(" ", "seconds")}, + id="set-timeout-limit", + ), + pytest.param( + lambda timeout: {'staged_at': (datetime.datetime.now() - datetime.timedelta(minutes=2*timeout)).isoformat(" ", "seconds")}, + id="set-staged-at", + ), +]) +def test_staging_ci_timeout(env, repo, config, page, update_op: Callable[[int], dict]): """If a staging timeouts (~ delay since staged greater than configured)... requeue? """ with repo: - m = repo.make_commit(None, 'initial', None, tree={'f': 'm'}) + m, _, c2 = repo.make_commits( + None, + Commit('initial', tree={'f': 'm'}), + Commit('first', tree={'f': 'c1'}), + Commit('second', tree={'f': 'c2'}), + ) repo.make_ref('heads/master', m) - c1 = repo.make_commit(m, 'first', None, tree={'f': 'c1'}) - c2 = repo.make_commit(c1, 'second', None, tree={'f': 'c2'}) pr = repo.make_pr(title='title', body='body', target='master', head=c2) - repo.post_status(pr.head, 'success', 'ci/runbot') - repo.post_status(pr.head, 'success', 'legal/cla') + repo.post_status(pr.head, 'success') pr.post_comment('hansen r+ rebase-merge', config['role_reviewer']['token']) env.run_crons() @@ -524,23 +609,26 @@ def test_staging_ci_timeout(env, repo, config, page): assert pr_id.staging_id timeout = env['runbot_merge.project'].search([]).ci_timeout - pr_id.staging_id.staged_at = odoo.fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(minutes=2*timeout)) - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + pr_id.staging_id.write(update_op(timeout)) + env.run_crons(None) assert pr_id.state == 'error', "timeout should fail the PR" dangerbox = pr_page(page, pr).cssselect('.alert-danger span') assert dangerbox assert dangerbox[0].text == 'timed out (>60 minutes)' +@pytest.mark.defaultstatuses def test_timeout_bump_on_pending(env, repo, config): with repo: - m = repo.make_commit(None, 'initial', None, tree={'f': '0'}) + [m, c] = repo.make_commits( + None, + Commit('initial', tree={'f': '0'}), + Commit('c', tree={'f': '1'}), + ) repo.make_ref('heads/master', m) - c = repo.make_commit(m, 'c', None, tree={'f': '1'}) prx = repo.make_pr(title='title', body='body', target='master', head=c) - repo.post_status(prx.head, 'success', 'ci/runbot') - repo.post_status(prx.head, 'success', 'legal/cla') + repo.post_status(prx.head, 'success') prx.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() @@ -548,8 +636,8 @@ def test_timeout_bump_on_pending(env, repo, config): old_timeout = odoo.fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(days=15)) st.timeout_limit = old_timeout with repo: - repo.post_status(repo.commit('heads/staging.master').id, 'pending', 'ci/runbot') - env.run_crons('runbot_merge.process_updated_commits') + repo.post_status('staging.master', 'pending') + env.run_crons(None) assert st.timeout_limit > old_timeout def test_staging_ci_failure_single(env, repo, users, config, page): @@ -569,11 +657,10 @@ def test_staging_ci_failure_single(env, repo, users, config, page): pr_id = to_pr(env, pr) assert pr_id.staging_id - staging_head = repo.commit('heads/staging.master') with repo: - repo.post_status(staging_head.id, 'failure', 'a/b') - repo.post_status(staging_head.id, 'success', 'legal/cla') - repo.post_status(staging_head.id, 'failure', 'ci/runbot') # stable genius + repo.post_status('staging.master', 'failure', 'a/b') + repo.post_status('staging.master', 'success', 'legal/cla') + repo.post_status('staging.master', 'failure', 'ci/runbot') # stable genius env.run_crons() assert pr_id.state == 'error' @@ -588,6 +675,7 @@ def test_staging_ci_failure_single(env, repo, users, config, page): assert dangerbox assert dangerbox[0].text == 'ci/runbot' + def test_ff_failure(env, repo, config, page): """ target updated while the PR is being staged => redo staging """ with repo: @@ -614,8 +702,8 @@ def test_ff_failure(env, repo, config, page): # report staging success & run cron to merge staging = repo.commit('heads/staging.master') with repo: - repo.post_status(staging.id, 'success', 'legal/cla') - repo.post_status(staging.id, 'success', 'ci/runbot') + repo.post_status('staging.master', 'success', 'legal/cla') + repo.post_status('staging.master', 'success', 'ci/runbot') env.run_crons() assert st.reason == 'update is not a fast forward' @@ -624,7 +712,7 @@ def test_ff_failure(env, repo, config, page): _new, prev = doc.cssselect('li.staging') assert 'bg-gray-lighter' in prev.classes, "ff failure is ~ cancelling" - assert prev.get('title') == re_matches(r'fast forward failed \(update is not a fast forward\)') + assert 'fast forward failed (update is not a fast forward)' in prev.get('title') assert env['runbot_merge.pull_requests'].search([ ('repository.name', '=', repo.name), @@ -633,6 +721,7 @@ def test_ff_failure(env, repo, config, page): assert repo.commit('heads/staging.master').id != staging.id,\ "PR should be staged to a new commit" + def test_ff_failure_batch(env, repo, users, config): with repo: m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) @@ -707,7 +796,7 @@ def test_ff_failure_batch(env, repo, users, config): } class TestPREdition: - def test_edit(self, env, repo, config): + def test_edit(self, env, project, repo, config): """ Editing PR: * title (-> message) @@ -750,7 +839,8 @@ class TestPREdition: with repo: prx.base = '1.0' assert pr.target == branch_1 assert not pr.staging_id, "updated the base of a staged PR should have unstaged it" - assert st.reason == f"{pr.display_name} target (base) branch was changed from 'master' to '1.0'" + assert st.state == 'cancelled', f"expected cancellation, got {st.state}" + assert st.reason == f"{pr.display_name} target (base) branch was changed from '{project.name}:master' to '{project.name}:1.0'" with repo: prx.base = '2.0' assert not pr.exists() @@ -762,9 +852,17 @@ class TestPREdition: ('number', '=', prx.number) ]).target == branch_1 - def test_retarget_update_commits(self, env, repo): - """ Retargeting a PR should update its commits count + def test_retarget_update_commits(self, env, project, repo): + """ Retargeting a PR should update its commits count, as well as follow + the new target's requirements """ + project.repo_ids.write({ + 'status_ids': [ + (5, 0, 0), + (0, 0, {'context': 'a', 'branch_filter': [('name', '=', 'master')]}), + (0, 0, {'context': 'b', 'branch_filter': [('name', '!=', 'master')]}), + ] + }) branch_1 = env['runbot_merge.branch'].create({ 'name': '1.0', 'project_id': env['runbot_merge.project'].search([]).id, @@ -773,29 +871,35 @@ class TestPREdition: with repo: # master is 1 commit ahead of 1.0 - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/1.0', m) - m2 = repo.make_commit(m, 'second', None, tree={'m': 'm2'}) - repo.make_ref('heads/master', m2) + [m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref='heads/1.0') + [m2] = repo.make_commits(m, Commit('second', tree={'m': 'm2'}), ref='heads/master') # the PR builds on master, but is errorneously targeted to 1.0 - c = repo.make_commit(m2, 'first', None, tree={'m': 'm3'}) - prx = repo.make_pr(title='title', body='body', target='1.0', head=c) + repo.make_commits(m2, Commit('first', tree={'m': 'm3'}), ref='heads/abranch') + prx = repo.make_pr(title='title', body='body', target='1.0', head='abranch') + repo.post_status('heads/abranch', 'success', 'a') + env.run_crons() pr = env['runbot_merge.pull_requests'].search([ ('repository.name', '=', repo.name), ('number', '=', prx.number) ]) assert not pr.squash + assert pr.status == 'pending' + assert pr.state == 'opened' with repo: prx.base = 'master' assert pr.target == master assert pr.squash + assert pr.status == 'success' + assert pr.state == 'validated' with repo: prx.base = '1.0' assert pr.target == branch_1 assert not pr.squash + assert pr.status == 'pending' + assert pr.state == 'opened' # check if things also work right when modifying the PR then # retargeting (don't see why not but...) @@ -851,6 +955,7 @@ def test_close_staged(env, repo, config, page): ('number', '=', prx.number), ]) env.run_crons() + assert pr.reviewed_by assert pr.state == 'ready' assert pr.staging_id @@ -862,6 +967,18 @@ def test_close_staged(env, repo, config, page): assert not env['runbot_merge.stagings'].search([]) assert pr.state == 'closed' assert pr_page(page, prx).cssselect('.alert-light') + assert not pr.reviewed_by + + with repo: + prx.open() + assert pr.state == 'validated' + assert not pr.reviewed_by + + with repo: + prx.post_comment('hansen r+', config['role_reviewer']['token']) + assert pr.reviewed_by + pr.write({'closed': True}) + assert not pr.reviewed_by def test_forward_port(env, repo, config): with repo: @@ -883,8 +1000,8 @@ def test_forward_port(env, repo, config): st = repo.commit('staging.master') with repo: - repo.post_status(st.id, 'success', 'legal/cla') - repo.post_status(st.id, 'success', 'ci/runbot') + repo.post_status('staging.master', 'success', 'legal/cla') + repo.post_status('staging.master', 'success', 'ci/runbot') env.run_crons() h = repo.commit('master') @@ -949,7 +1066,7 @@ def test_rebase_failure(env, repo, users, config): assert pr_a.comments == [ (users['reviewer'], 'hansen r+'), seen(env, pr_a, users), - (users['user'], re_matches(r'^Unable to stage PR')), + (users['user'], matches('Unable to stage PR')), ] assert pr_b.comments == [ (users['reviewer'], 'hansen r+'), @@ -960,37 +1077,6 @@ def test_rebase_failure(env, repo, users, config): 'b': 'b', } -def test_ci_failure_after_review(env, repo, users, config): - """ If a PR is r+'d but the CI ends up failing afterwards, ping the user - so they're aware. This is useful for the more "fire and forget" approach - especially small / simple PRs where you assume they're going to pass and - just r+ immediately. - """ - with repo: - prx = _simple_init(repo) - prx.post_comment('hansen r+', config['role_reviewer']['token']) - env.run_crons() - - for ctx, url in [ - ('ci/runbot', 'https://a'), - ('ci/runbot', 'https://a'), - ('legal/cla', 'https://b'), - ('foo/bar', 'https://c'), - ('ci/runbot', 'https://a'), - ('legal/cla', 'https://d'), # url changes so different from the previous - ]: - with repo: - repo.post_status(prx.head, 'failure', ctx, target_url=url) - env.run_crons() - - assert prx.comments == [ - (users['reviewer'], 'hansen r+'), - seen(env, prx, users), - (users['user'], "@{user} @{reviewer} 'ci/runbot' failed on this reviewed PR.".format_map(users)), - (users['user'], "@{user} @{reviewer} 'legal/cla' failed on this reviewed PR.".format_map(users)), - (users['user'], "@{user} @{reviewer} 'legal/cla' failed on this reviewed PR.".format_map(users)), - ] - def test_reopen_merged_pr(env, repo, config, users): """ Reopening a *merged* PR should cause us to immediately close it again, and insult whoever did it @@ -1036,63 +1122,63 @@ def test_reopen_merged_pr(env, repo, config, users): ] class TestNoRequiredStatus: + @pytest.mark.defaultstatuses def test_basic(self, env, repo, config): """ check that mergebot can work on a repo with no CI at all """ env['runbot_merge.repository'].search([('name', '=', repo.name)]).status_ids = False with repo: - m = repo.make_commit(None, 'initial', None, tree={'0': '0'}) + [m, c] = repo.make_commits( + None, + Commit('initial', tree={'0': '0'}), + Commit('first', tree={'0': '1'}), + ) repo.make_ref('heads/master', m) - c = repo.make_commit(m, 'first', None, tree={'0': '1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) - prx.post_comment('hansen r+', config['role_reviewer']['token']) + pr = repo.make_pr(title='title', body='body', target='master', head=c) + pr.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]) - assert pr.state == 'ready' - st = pr.staging_id - assert st - env.run_crons() + pr_id = to_pr(env, pr) + + st = env['runbot_merge.stagings'].search([], context={'active_test': False}) assert st.state == 'success' - assert pr.state == 'merged' + assert pr_id.state == 'merged' + @pytest.mark.defaultstatuses def test_updated(self, env, repo, config): env['runbot_merge.repository'].search([('name', '=', repo.name)]).status_ids = False with repo: - m = repo.make_commit(None, 'initial', None, tree={'0': '0'}) + [m, c] = repo.make_commits( + None, + Commit('initial', tree={'0': '0'}), + Commit('first', tree={'0': '1'}), + ) repo.make_ref('heads/master', m) - c = repo.make_commit(m, 'first', None, tree={'0': '1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) + pr = repo.make_pr(title='title', body='body', target='master', head=c) env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]) - assert pr.state == 'validated' + pr_id = to_pr(env, pr) + assert pr_id.state == 'validated' # normal push with repo: - repo.make_commits(c, repo.Commit('second', tree={'0': '2'}), ref=prx.ref) + repo.make_commits(c, repo.Commit('second', tree={'0': '2'}), ref=pr.ref) env.run_crons() - assert pr.state == 'validated' + assert pr_id.state == 'validated' with repo: - prx.post_comment('hansen r+', config['role_reviewer']['token']) - assert pr.state == 'ready' + pr.post_comment('hansen r+', config['role_reviewer']['token']) + assert pr_id.state == 'ready' # force push with repo: - repo.make_commits(m, repo.Commit('xxx', tree={'0': 'm'}), ref=prx.ref) + repo.make_commits(m, repo.Commit('xxx', tree={'0': 'm'}), ref=pr.ref) env.run_crons() - assert pr.state == 'validated' + assert pr_id.state == 'validated' with repo: - prx.post_comment('hansen r+', config['role_reviewer']['token']) - assert pr.state == 'ready' + pr.post_comment('hansen r+', config['role_reviewer']['token']) + assert pr_id.state == 'ready' class TestRetry: @pytest.mark.xfail(reason="This may not be a good idea as it could lead to tons of rebuild spam") @@ -1108,8 +1194,8 @@ class TestRetry: ]).staging_id staging_head = repo.commit('heads/staging.master') - repo.post_status(staging_head.id, 'success', 'legal/cla') - repo.post_status(staging_head.id, 'failure', 'ci/runbot') + repo.post_status('staging.master', 'success', 'legal/cla') + repo.post_status('staging.master', 'failure', 'ci/runbot') env.run_crons() pr = env['runbot_merge.pull_requests'].search([ ('repository.name', '=', repo.name), @@ -1128,8 +1214,8 @@ class TestRetry: staging_head2 = repo.commit('heads/staging.master') assert staging_head2 != staging_head - repo.post_status(staging_head2.id, 'success', 'legal/cla') - repo.post_status(staging_head2.id, 'success', 'ci/runbot') + repo.post_status('staging.master', 'success', 'legal/cla') + repo.post_status('staging.master', 'success', 'ci/runbot') env.run_crons() assert pr.state == 'merged' @@ -1139,45 +1225,50 @@ class TestRetry: reviewer asks for it """ with repo: - prx = _simple_init(repo) - repo.post_status(prx.head, 'success', 'ci/runbot') - repo.post_status(prx.head, 'success', 'legal/cla') - prx.post_comment('hansen r+ delegate=%s rebase-merge' % users['other'], - config["role_reviewer"]['token']) + pr = _simple_init(repo) + repo.post_status(pr.head, 'success', 'ci/runbot') + repo.post_status(pr.head, 'success', 'legal/cla') + pr.post_comment(f'hansen r+ delegate={users["other"]} rebase-merge', + config["role_reviewer"]['token']) env.run_crons() - assert env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]).staging_id + pr_id = to_pr(env, pr) + assert pr_id.staging_id staging_head = repo.commit('heads/staging.master') with repo: - repo.post_status(staging_head.id, 'success', 'legal/cla') - repo.post_status(staging_head.id, 'failure', 'ci/runbot') + repo.post_status('staging.master', 'success', 'legal/cla') + repo.post_status('staging.master', 'failure', 'ci/runbot') env.run_crons() - assert env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]).state == 'error' + assert pr_id.state == 'error' with repo: - prx.post_comment('hansen retry', config['role_' + retrier]['token']) - assert env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]).state == 'ready' - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + pr.post_comment('hansen r+ rebase-ff', config["role_reviewer"]['token']) + env.run_crons() + assert pr_id.state == 'error' + assert pr.comments == [ + (users['reviewer'], f'hansen r+ delegate={users["other"]} rebase-merge'), + seen(env, pr, users), + (users['user'], 'Merge method set to rebase and merge, using the PR as merge commit message.'), + (users['user'], '@{user} @{reviewer} staging failed: ci/runbot'.format_map(users)), + (users['reviewer'], 'hansen r+ rebase-ff'), + (users['user'], "This PR is already reviewed, it's in error, you might want to `retry` it instead " + "(if you have already confirmed the error is not legitimate)."), + (users['user'], 'Merge method set to rebase and fast-forward.'), + ] + assert pr_id.merge_method == 'rebase-ff' + + with repo: + pr.post_comment('hansen retry', config['role_' + retrier]['token']) + assert pr_id.state == 'ready' + env.run_crons(None) staging_head2 = repo.commit('heads/staging.master') assert staging_head2 != staging_head with repo: - repo.post_status(staging_head2.id, 'success', 'legal/cla') - repo.post_status(staging_head2.id, 'success', 'ci/runbot') + repo.post_status('staging.master', 'success', 'legal/cla') + repo.post_status('staging.master', 'success', 'ci/runbot') env.run_crons() - assert env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]).state == 'merged' + assert pr_id.state == 'merged' def test_retry_again_message(self, env, repo, users, config, page): """ For a retried PR, the error message on the PR's page should be the @@ -1202,7 +1293,7 @@ class TestRetry: with repo: pr.post_comment('hansen retry', config['role_reviewer']['token']) - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons(None) with repo: repo.post_status('staging.master', 'success', 'legal/cla') @@ -1220,53 +1311,48 @@ class TestRetry: """ with repo: prx = _simple_init(repo) - prx.post_comment('hansen r+', config['role_reviewer']['token']) + prx.post_comment('hansen r+ rebase-ff', config['role_reviewer']['token']) prx.post_comment('hansen retry', config['role_reviewer']['token']) env.run_crons() assert prx.comments == [ - (users['reviewer'], 'hansen r+'), + (users['reviewer'], 'hansen r+ rebase-ff'), (users['reviewer'], 'hansen retry'), seen(env, prx, users), - (users['user'], "I'm sorry, @{reviewer}: retry makes no sense when the PR is not in error.".format_map(users)), + (users['user'], "Merge method set to rebase and fast-forward."), + (users['user'], "@{reviewer} retry makes no sense when the PR is not in error.".format_map(users)), ] + @pytest.mark.defaultstatuses @pytest.mark.parametrize('disabler', ['user', 'other', 'reviewer']) def test_retry_disable(self, env, repo, disabler, users, config): with repo: prx = _simple_init(repo) - repo.post_status(prx.head, 'success', 'ci/runbot') - repo.post_status(prx.head, 'success', 'legal/cla') + repo.post_status(prx.head, 'success') prx.post_comment('hansen r+ delegate=%s rebase-merge' % users['other'], config["role_reviewer"]['token']) env.run_crons() - assert env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]).staging_id + pr_id = to_pr(env, prx) + staging_id = pr_id.staging_id + assert staging_id - staging_head = repo.commit('heads/staging.master') with repo: - repo.post_status(staging_head.id, 'success', 'legal/cla') - repo.post_status(staging_head.id, 'failure', 'ci/runbot') + repo.post_status('staging.master', 'failure') env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]) - assert pr.state == 'error' + assert staging_id.state == 'failure' + assert not staging_id.active + assert pr_id.state == 'error' with repo: prx.post_comment('hansen r-', config['role_' + disabler]['token']) - assert pr.state == 'validated' + assert pr_id.state == 'validated' with repo: repo.make_commit(prx.ref, 'third', None, tree={'m': 'c3'}) # just in case, apparently in some case the first post_status uses the old head... with repo: - repo.post_status(prx.head, 'success', 'ci/runbot') - repo.post_status(prx.head, 'success', 'legal/cla') + repo.post_status(prx.head, 'success') env.run_crons() - assert pr.state == 'validated' + assert pr_id.state == 'validated' class TestMergeMethod: """ @@ -1307,8 +1393,8 @@ class TestMergeMethod: "dummy commit aside, the previous master's tip should be the sole parent of the staging commit" with repo: - repo.post_status(staging.id, 'success', 'legal/cla') - repo.post_status(staging.id, 'success', 'ci/runbot') + repo.post_status('staging.master', 'success', 'legal/cla') + repo.post_status('staging.master', 'success', 'ci/runbot') env.run_crons() pr = env['runbot_merge.pull_requests'].search([ ('repository.name', '=', repo.name), @@ -1697,7 +1783,7 @@ commits, I need to know how to merge it: c0 = repo.make_commit(m, 'C0', None, tree={'a': 'b'}) prx = repo.make_pr(title="gibberish", body="blahblah", target='master', head=c0) - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons(None) with repo: repo.post_status(prx.head, 'success', 'legal/cla') @@ -1739,7 +1825,7 @@ commits, I need to know how to merge it: c0 = repo.make_commit(m, 'C0', None, tree={'a': 'b'}) prx = repo.make_pr(title="gibberish", body=None, target='master', head=c0) - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons(None) with repo: repo.post_status(prx.head, 'success', 'legal/cla') @@ -1776,9 +1862,9 @@ commits, I need to know how to merge it: with repo: root = repo.make_commits(None, Commit("root", tree={'a': 'a'}), ref='heads/master') - repo.make_commits(root, Commit('C', tree={'a': 'b'}), ref=f'heads/change') + repo.make_commits(root, Commit('C', tree={'a': 'b'}), ref='heads/change') pr = repo.make_pr(title="title", body=f'first\n{separator}\nsecond', - target='master', head=f'change') + target='master', head='change') repo.post_status(pr.head, 'success', 'legal/cla') repo.post_status(pr.head, 'success', 'ci/runbot') pr.post_comment('hansen r+ merge', config['role_reviewer']['token']) @@ -1806,7 +1892,7 @@ commits, I need to know how to merge it: with repo: root = repo.make_commits(None, Commit("root", tree={'a': 'a'}), ref='heads/master') - repo.make_commits(root, Commit('C', tree={'a': 'b'}), ref=f'heads/change') + repo.make_commits(root, Commit('C', tree={'a': 'b'}), ref='heads/change') pr = repo.make_pr(title="title", body="""\ Title --- @@ -1818,7 +1904,7 @@ This is more text *** removed """, - target='master', head=f'change') + target='master', head='change') repo.post_status(pr.head, 'success', 'legal/cla') repo.post_status(pr.head, 'success', 'ci/runbot') pr.post_comment('hansen r+ merge', config['role_reviewer']['token']) @@ -1853,8 +1939,8 @@ removed with repo: root = repo.make_commits(None, Commit("root", tree={'a': 'a'}), ref='heads/master') - repo.make_commits(root, Commit('Commit\n\nfirst\n***\nsecond', tree={'a': 'b'}), ref=f'heads/change') - pr = repo.make_pr(title="PR", body=f'first\n***\nsecond', + repo.make_commits(root, Commit('Commit\n\nfirst\n***\nsecond', tree={'a': 'b'}), ref='heads/change') + pr = repo.make_pr(title="PR", body='first\n***\nsecond', target='master', head='change') repo.post_status(pr.head, 'success', 'legal/cla') repo.post_status(pr.head, 'success', 'ci/runbot') @@ -1915,7 +2001,8 @@ Some: thing is odd -Part-of: {pr_id.display_name}""" +Part-of: {pr_id.display_name} +Signed-off-by: {reviewer}""" def test_pr_mergehead(self, repo, env, config): """ if the head of the PR is a merge commit and one of the parents is @@ -2007,7 +2094,7 @@ Part-of: {pr_id.display_name}""" assert log_to_node(repo.log('heads/master')), expected def test_squash_merge(self, repo, env, config, users): - other_user = requests.get(f'https://api.github.com/user', headers={ + other_user = requests.get('https://api.github.com/user', headers={ 'Authorization': 'token %s' % config['role_other']['token'], }).json() other_user = { @@ -2089,7 +2176,7 @@ Signed-off-by: {get_partner(env, users["reviewer"]).formatted_email}\ # FIXME: should probably get the token from the project to be sure it's # the bot user - current_user = repo._session.get(f'https://api.github.com/user').json() + current_user = repo._session.get('https://api.github.com/user').json() current_user = { 'name': current_user['name'] or current_user['login'], # FIXME: not guaranteed @@ -2127,10 +2214,8 @@ class TestPRUpdate(object): c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'}) prx = repo.make_pr(title='title', body='body', target='master', head=c) - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number), - ]) + + pr = to_pr(env, prx) assert pr.head == c # alter & push force PR entirely with repo: @@ -2138,25 +2223,28 @@ class TestPRUpdate(object): repo.update_ref(prx.ref, c2, force=True) assert pr.head == c2 - def test_reopen_update(self, env, repo): + def test_reopen_update(self, env, repo, config): with repo: m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) repo.make_ref('heads/master', m) c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'}) prx = repo.make_pr(title='title', body='body', target='master', head=c) - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number), - ]) + prx.post_comment("hansen r+", config['role_reviewer']['token']) + + pr = to_pr(env, prx) + assert pr.state == 'approved' + assert pr.reviewed_by with repo: prx.close() assert pr.state == 'closed' assert pr.head == c + assert not pr.reviewed_by with repo: prx.open() assert pr.state == 'opened' + assert not pr.reviewed_by with repo: c2 = repo.make_commit(c, 'first', None, tree={'m': 'cc'}) @@ -2175,10 +2263,7 @@ class TestPRUpdate(object): repo.post_status(prx.head, 'success', 'legal/cla') repo.post_status(prx.head, 'success', 'ci/runbot') env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number), - ]) + pr = to_pr(env, prx) assert pr.head == c assert pr.state == 'validated' @@ -2196,10 +2281,8 @@ class TestPRUpdate(object): c = repo.make_commit(m, 'fist', None, tree={'m': 'c1'}) prx = repo.make_pr(title='title', body='body', target='master', head=c) prx.post_comment('hansen r+', config['role_reviewer']['token']) - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number), - ]) + + pr = to_pr(env, prx) assert pr.head == c assert pr.state == 'approved' @@ -2222,10 +2305,7 @@ class TestPRUpdate(object): repo.post_status(prx.head, 'success', 'ci/runbot') prx.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number), - ]) + pr = to_pr(env, prx) assert pr.head == c assert pr.state == 'ready' @@ -2247,11 +2327,9 @@ class TestPRUpdate(object): repo.post_status(prx.head, 'success', 'legal/cla') repo.post_status(prx.head, 'success', 'ci/runbot') prx.post_comment('hansen r+', config['role_reviewer']['token']) - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number), - ]) + env.run_crons() + pr = to_pr(env, prx) assert pr.state == 'ready' assert pr.staging_id @@ -2320,18 +2398,14 @@ class TestPRUpdate(object): repo.post_status(prx.head, 'success', 'legal/cla') repo.post_status(prx.head, 'success', 'ci/runbot') prx.post_comment('hansen r+', config['role_reviewer']['token']) - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number), - ]) env.run_crons() + pr = to_pr(env, prx) assert pr.state == 'ready' assert pr.staging_id - h = repo.commit('heads/staging.master').id with repo: - repo.post_status(h, 'success', 'legal/cla') - repo.post_status(h, 'failure', 'ci/runbot') + repo.post_status('staging.master', 'success', 'legal/cla') + repo.post_status('staging.master', 'failure', 'ci/runbot') env.run_crons() assert not pr.staging_id assert pr.state == 'error' @@ -2377,10 +2451,7 @@ class TestPRUpdate(object): with repo: prx = repo.make_pr(title='title', body='body', target='master', head=c) - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number), - ]) + pr = to_pr(env, prx) assert pr.head == c assert pr.state == 'opened' @@ -2413,13 +2484,13 @@ class TestPRUpdate(object): repo.post_status(pr.head, 'success', 'legal/cla') repo.post_status(pr.head, 'success', 'ci/runbot') pr.post_comment('hansen r+', config['role_reviewer']['token']) - pr_id = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', pr.number), - ]) - env.run_crons('runbot_merge.process_updated_commits') + + env.run_crons() + pr_id = to_pr(env, pr) + env.run_crons(None) assert pr_id.message == 'title\n\nbody' assert pr_id.state == 'ready' + old_reviewer = pr_id.reviewed_by # TODO: find way to somehow skip / ignore the update_ref? with repo: @@ -2440,10 +2511,12 @@ class TestPRUpdate(object): # in a "ready" state pr_id.write({ 'head': c, - 'state': 'ready', + 'reviewed_by': old_reviewer.id, 'message': "Something else", 'target': other.id, }) + assert pr_id.head == c + assert pr_id.state == "ready" env.run_crons() @@ -2452,8 +2525,8 @@ class TestPRUpdate(object): assert pr_id.head == c2 assert pr_id.message == 'title\n\nbody' assert pr_id.target.name == 'master' - assert pr.comments[-1]['body'] == """\ -@{} @{} we apparently missed updates to this PR and tried to stage it in a state \ + assert pr.comments[-1]['body'] == f"""\ +@{users['user']} we apparently missed updates to this PR and tried to stage it in a state \ which might not have been approved. The properties Head, Target, Message were not correctly synchronized and have been updated. @@ -2462,8 +2535,8 @@ The properties Head, Target, Message were not correctly synchronized and have be ```diff Head: -- {} -+ {} +- {c} ++ {c2} Target branch: - somethingelse @@ -2481,7 +2554,7 @@ The properties Head, Target, Message were not correctly synchronized and have be Note that we are unable to check the properties Merge Method, Overrides, Draft. Please check and re-approve. -""".format(users['user'], users['reviewer'], c, c2) +""" # if the head commit doesn't change, that part should still be valid with repo: @@ -2492,8 +2565,8 @@ Please check and re-approve. assert pr_id.message == 'title\n\nbody' assert pr_id.state == 'validated' - assert pr.comments[-1]['body'] == """\ -@{} @{} we apparently missed updates to this PR and tried to stage it in a state \ + assert pr.comments[-1]['body'] == f"""\ +@{users['user']} we apparently missed updates to this PR and tried to stage it in a state \ which might not have been approved. The properties Message were not correctly synchronized and have been updated. @@ -2513,13 +2586,14 @@ The properties Message were not correctly synchronized and have been updated. Note that we are unable to check the properties Merge Method, Overrides, Draft. Please check and re-approve. -""".format(users['user'], users['reviewer']) +""" pr_id.write({ 'head': c, - 'state': 'ready', + 'reviewed_by': old_reviewer.id, 'message': "Something else", 'target': other.id, + 'draft': True, }) with repo: pr.post_comment('hansen check') @@ -2528,7 +2602,11 @@ Please check and re-approve. assert pr_id.head == c2 assert pr_id.message == 'title\n\nbody' # the commit's message was used for the PR assert pr_id.target.name == 'master' - assert pr.comments[-1] == (users['user'], f"Updated target, squash, message. Updated to {c2}.") + assert not pr_id.draft + assert pr.comments[-1] == ( + users['user'], + f"Updated target, squash, message. Updated {pr_id.display_name} to ready. Updated to {c2}." + ) def test_update_closed(self, env, repo): with repo: @@ -2537,10 +2615,7 @@ Please check and re-approve. [c] = repo.make_commits(m, repo.Commit('first', tree={'m': 'm3'}), ref='heads/abranch') prx = repo.make_pr(title='title', body='body', target='master', head=c) env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]) + pr = to_pr(env, prx) assert pr.state == 'opened' assert pr.head == c assert pr.squash @@ -2576,10 +2651,8 @@ Please check and re-approve. repo.post_status(c, 'success', 'ci/runbot') prx = repo.make_pr(title='title', body='body', target='master', head=c) - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number), - ]) + env.run_crons() + pr = to_pr(env, prx) assert pr.state == 'validated', \ "if a PR is created on a CI'd commit, it should be validated immediately" @@ -2590,28 +2663,6 @@ Please check and re-approve. assert pr.state == 'validated', \ "if a PR is reopened and had a CI'd head, it should be validated immediately" - @pytest.mark.xfail(reason="github doesn't allow reopening force-pushed PRs", strict=True) - def test_force_update_closed(self, env, repo): - with repo: - [m] = repo.make_commits(None, repo.Commit('initial', tree={'m': 'm'}), ref='heads/master') - - [c] = repo.make_commits(m, repo.Commit('first', tree={'m': 'm3'}), ref='heads/abranch') - prx = repo.make_pr(title='title', body='body', target='master', head=c) - env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]) - with repo: - prx.close() - - with repo: - c2 = repo.make_commit(m, 'xxx', None, tree={'m': 'm4'}) - repo.update_ref(prx.ref, c2, force=True) - - with repo: - prx.open() - assert pr.head == c2 class TestBatching(object): def _pr(self, repo, prefix, trees, *, target='master', user, reviewer, @@ -2744,6 +2795,9 @@ class TestBatching(object): def test_batching_pressing(self, env, repo, config): """ "Pressing" PRs should be selected before normal & batched together """ + # by limiting the batch size to 3 we allow both high-priority PRs, but + # a single normal priority one + env['runbot_merge.project'].search([]).batch_limit = 3 with repo: m = repo.make_commit(None, 'initial', None, tree={'a': 'some content'}) repo.make_ref('heads/master', m) @@ -2753,51 +2807,57 @@ class TestBatching(object): pr11 = self._pr(repo, 'Pressing1', [{'x': 'x'}, {'y': 'y'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']) pr12 = self._pr(repo, 'Pressing2', [{'z': 'z'}, {'zz': 'zz'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']) - pr11.post_comment('hansen priority=1', config['role_reviewer']['token']) - pr12.post_comment('hansen priority=1', config['role_reviewer']['token']) - - pr21, pr22, pr11, pr12 = prs = [to_pr(env, pr) for pr in [pr21, pr22, pr11, pr12]] - assert pr21.priority == pr22.priority == 2 - assert pr11.priority == pr12.priority == 1 - + pr11.post_comment('hansen priority', config['role_reviewer']['token']) + pr12.post_comment('hansen priority', config['role_reviewer']['token']) + # necessary to project commit statuses onto PRs env.run_crons() + pr21, pr22, pr11, pr12 = prs = [to_pr(env, pr) for pr in [pr21, pr22, pr11, pr12]] + assert pr11.priority == pr12.priority == 'priority' + assert pr21.priority == pr22.priority == 'default' assert all(pr.state == 'ready' for pr in prs) - assert not pr21.staging_id - assert not pr22.staging_id - assert pr11.staging_id - assert pr12.staging_id - assert pr11.staging_id == pr12.staging_id + staging = ensure_one(env['runbot_merge.stagings'].search([])) + assert staging.pr_ids == pr11 | pr12 | pr21 + assert list(staging.batch_ids) == [ + pr11.batch_id, + pr12.batch_id, + pr21.batch_id, + ] + assert not pr22.staging_id + + @pytest.mark.usefixtures("reviewer_admin") def test_batching_urgent(self, env, repo, config): with repo: m = repo.make_commit(None, 'initial', None, tree={'a': 'some content'}) repo.make_ref('heads/master', m) - pr21 = self._pr(repo, 'PR1', [{'a': 'AAA'}, {'b': 'BBB'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']) - pr22 = self._pr(repo, 'PR2', [{'c': 'CCC'}, {'d': 'DDD'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']) - pr11 = self._pr(repo, 'Pressing1', [{'x': 'x'}, {'y': 'y'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']) pr12 = self._pr(repo, 'Pressing2', [{'z': 'z'}, {'zz': 'zz'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']) - pr11.post_comment('hansen priority=1', config['role_reviewer']['token']) - pr12.post_comment('hansen priority=1', config['role_reviewer']['token']) + pr11.post_comment('hansen NOW', config['role_reviewer']['token']) + pr12.post_comment('hansen NOW', config['role_reviewer']['token']) - # stage PR1 + # stage current PRs env.run_crons() - p_11, p_12, p_21, p_22 = \ - [to_pr(env, pr) for pr in [pr11, pr12, pr21, pr22]] - assert not p_21.staging_id or p_22.staging_id - assert p_11.staging_id and p_12.staging_id - assert p_11.staging_id == p_12.staging_id - staging_1 = p_11.staging_id + p_11, p_12 = \ + [to_pr(env, pr) for pr in [pr11, pr12]] + sm_all = p_11 | p_12 + staging_1 = sm_all.staging_id + assert staging_1 + assert len(staging_1) == 1 + assert list(staging_1.batch_ids) == [ + p_11.batch_id, + p_12.batch_id, + ] # no statuses run on PR0s with repo: pr01 = self._pr(repo, 'Urgent1', [{'n': 'n'}, {'o': 'o'}], user=config['role_user']['token'], reviewer=None, statuses=[]) - pr01.post_comment('hansen priority=0 rebase-merge', config['role_reviewer']['token']) + pr01.post_comment('hansen NOW! rebase-merge', config['role_reviewer']['token']) p_01 = to_pr(env, pr01) - assert p_01.state == 'opened' - assert p_01.priority == 0 + assert p_01.state == 'ready' + assert p_01.priority == 'alone' + assert p_01.skipchecks == True env.run_crons() # first staging should be cancelled and PR0 should be staged @@ -2805,9 +2865,90 @@ class TestBatching(object): assert not staging_1.active assert not p_11.staging_id and not p_12.staging_id assert p_01.staging_id + assert p_11.state == 'ready' + assert p_12.state == 'ready' + # make the staging fail + with repo: + repo.post_status('staging.master', 'failure', 'ci/runbot') + env.run_crons() + assert p_01.error + assert p_01.batch_id.blocked + assert p_01.blocked + + assert p_01.state == 'error' + assert not p_01.staging_id.active + staging_2 = ensure_one(sm_all.staging_id) + assert staging_2 != staging_1 + + with repo: + pr01.post_comment('hansen retry', config['role_reviewer']['token']) + env.run_crons() + # retry should have re-triggered cancel-staging + assert not staging_2.active + assert p_01.staging_id.active + + # make the staging fail again + with repo: + repo.post_status('staging.master', 'failure', 'ci/runbot') + env.run_crons() + + assert not p_01.staging_id.active + assert p_01.state == 'error' + staging_3 = ensure_one(sm_all.staging_id) + assert staging_3 != staging_2 + + # check that updating the PR resets it to ~ready + with repo: + repo.make_commits( + 'heads/master', + Commit("urgent+", tree={'y': 'es'}), + ref="heads/Urgent1", + ) + env.run_crons() + assert not staging_3.active + assert p_01.state == 'ready' + assert p_01.priority == 'alone' + assert p_01.skipchecks == True + assert p_01.staging_id.active + + # r- should unstage, re-enable the checks and switch off staging + # cancellation, but leave the priority + with repo: + pr01.post_comment("hansen r-", config['role_reviewer']['token']) + env.run_crons() + + staging_4 = ensure_one(sm_all.staging_id) + assert staging_4 != staging_3 + + assert not p_01.staging_id.active + assert p_01.state == 'opened' + assert p_01.priority == 'alone' + assert p_01.skipchecks == False + assert p_01.cancel_staging == True + + assert staging_4.active, "staging should not be disabled" + + # cause the PR to become ready the normal way + with repo: + pr01.post_comment("hansen r+", config['role_reviewer']['token']) + repo.post_status(p_01.head, 'success', 'legal/cla') + repo.post_status(p_01.head, 'success', 'ci/runbot') + env.run_crons() + + # a cancel_staging pr becoming ready should have cancelled the staging, + # and because the PR is `alone` it should... have been restaged alone, + # without the ready non-alone PRs + assert not sm_all.staging_id.active + assert p_01.staging_id.active + assert p_01.state == 'ready' + assert p_01.priority == 'alone' + assert p_01.skipchecks == False + assert p_01.cancel_staging == True + + @pytest.mark.usefixtures("reviewer_admin") def test_batching_urgenter_than_split(self, env, repo, config): - """ p=0 PRs should take priority over split stagings (processing + """ p=alone PRs should take priority over split stagings (processing of a staging having CI-failed and being split into sub-stagings) """ with repo: @@ -2838,13 +2979,14 @@ class TestBatching(object): # during restaging of pr1, create urgent PR with repo: pr0 = self._pr(repo, 'urgent', [{'a': 'a', 'b': 'b'}], user=config['role_user']['token'], reviewer=None, statuses=[]) - pr0.post_comment('hansen priority=0', config['role_reviewer']['token']) + pr0.post_comment('hansen NOW!', config['role_reviewer']['token']) env.run_crons() # TODO: maybe just deactivate stagings instead of deleting them when canceling? assert not p_1.staging_id assert to_pr(env, pr0).staging_id + @pytest.mark.usefixtures("reviewer_admin") def test_urgent_failed(self, env, repo, config): """ Ensure pr[p=0,state=failed] don't get picked up """ @@ -2859,14 +3001,58 @@ class TestBatching(object): # no statuses run on PR0s with repo: pr01 = self._pr(repo, 'Urgent1', [{'n': 'n'}, {'o': 'o'}], user=config['role_user']['token'], reviewer=None, statuses=[]) - pr01.post_comment('hansen priority=0', config['role_reviewer']['token']) + pr01.post_comment('hansen NOW!', config['role_reviewer']['token']) p_01 = to_pr(env, pr01) - p_01.state = 'error' + p_01.error = True env.run_crons() assert not p_01.staging_id, "p_01 should not be picked up as it's failed" assert p_21.staging_id, "p_21 should have been staged" + def test_urgent_split(self, env, repo, config): + """Ensure that urgent (alone) PRs which get split don't get + double-merged + """ + with repo: + repo.make_commits( + None, + Commit("initial", tree={'a': '1'}), + ref="heads/master" + ) + + pr01 = self._pr( + repo, "PR1", [{'b': '1'}], + user=config['role_user']['token'], + reviewer=None, + ) + pr01.post_comment('hansen alone r+', config['role_reviewer']['token']) + pr02 = self._pr( + repo, "PR2", [{'c': '1'}], + user=config['role_user']['token'], + reviewer=None, + ) + pr02.post_comment('hansen alone r+', config['role_reviewer']['token']) + env.run_crons(None) + pr01_id = to_pr(env, pr01) + assert pr01_id.blocked is False + pr02_id = to_pr(env, pr02) + assert pr01_id.blocked is False + + env.run_crons() + st = pr01_id.staging_id + assert st and pr02_id.staging_id == st + with repo: + repo.post_status('staging.master', 'failure', 'ci/runbot') + env.run_crons() + # should have cancelled the staging, split it, and re-staged the first + # half of the split + assert st.state == 'failure' + assert pr01_id.staging_id and pr01_id.staging_id != st + assert not pr02_id.staging_id + split_prs = env['runbot_merge.split'].search([]).batch_ids.prs + assert split_prs == pr02_id, \ + f"only the unstaged PR {pr02_id} should be in a split, found {split_prs}" + @pytest.mark.skip(reason="Maybe nothing to do, the PR is just skipped and put in error?") def test_batching_merge_failure(self): pass @@ -2904,23 +3090,21 @@ class TestBatching(object): assert len(sp) == 1 # This is the failing PR! - h = repo.commit('heads/staging.master').id with repo: - repo.post_status(h, 'failure', 'ci/runbot') - repo.post_status(h, 'success', 'legal/cla') + repo.post_status('staging.master', 'failure', 'ci/runbot') + repo.post_status('staging.master', 'success', 'legal/cla') env.run_crons() assert pr1.state == 'error' assert pr2.staging_id - h = repo.commit('heads/staging.master').id with repo: - repo.post_status(h, 'success', 'ci/runbot') - repo.post_status(h, 'success', 'legal/cla') - env.run_crons('runbot_merge.process_updated_commits', 'runbot_merge.merge_cron', 'runbot_merge.staging_cron') + repo.post_status('staging.master', 'success', 'ci/runbot') + repo.post_status('staging.master', 'success', 'legal/cla') + env.run_crons(None) assert pr2.state == 'merged' -class TestReviewing(object): +class TestReviewing: def test_reviewer_rights(self, env, repo, users, config): """Only users with review rights will have their r+ (and other attributes) taken in account @@ -2958,23 +3142,22 @@ class TestReviewing(object): (users['user'], "I'm sorry, @{}. I'm afraid I can't do that.".format(users['other'])), (users['reviewer'], 'hansen r+'), (users['reviewer'], 'hansen r+'), - (users['user'], "I'm sorry, @{}: this PR is already reviewed, reviewing it again is useless.".format( + (users['user'], "This PR is already reviewed, reviewing it again is useless.".format( users['reviewer'])), ] def test_self_review_fail(self, env, repo, users, config): """ Normal reviewers can't self-review """ + reviewer = config['role_reviewer']['token'] with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - m2 = repo.make_commit(m, 'second', None, tree={'m': 'm', 'm2': 'm2'}) - repo.make_ref('heads/master', m2) - - c1 = repo.make_commit(m, 'first', None, tree={'m': 'c1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c1, token=config['role_reviewer']['token']) + [m, _] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), Commit('second', tree={'m2': 'm2'}), ref='heads/master') + with repo.fork(token=reviewer) as f: + f.make_commits(m, Commit('first', tree={'m': 'c1'}), ref='heads/change') + prx = repo.make_pr(title='title', body='body', target='master', head=f'{f.owner}:change', token=reviewer) repo.post_status(prx.head, 'success', 'legal/cla') repo.post_status(prx.head, 'success', 'ci/runbot') - prx.post_comment('hansen r+', config['role_reviewer']['token']) + prx.post_comment('hansen r+', reviewer) env.run_crons() assert prx.user == users['reviewer'] @@ -2987,22 +3170,21 @@ class TestReviewing(object): assert prx.comments == [ (users['reviewer'], 'hansen r+'), seen(env, prx, users), - (users['user'], "I'm sorry, @{}: you can't review+.".format(users['reviewer'])), + (users['user'], "@{} you can't review+.".format(users['reviewer'])), ] def test_self_review_success(self, env, repo, users, config): """ Some users are allowed to self-review """ + self_reviewer = config['role_self_reviewer']['token'] with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - m2 = repo.make_commit(m, 'second', None, tree={'m': 'm', 'm2': 'm2'}) - repo.make_ref('heads/master', m2) - - c1 = repo.make_commit(m, 'first', None, tree={'m': 'c1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c1, token=config['role_self_reviewer']['token']) + [m, _] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), Commit('second', tree={'m': 'm', 'm2': 'm2'}), ref='heads/master') + with repo.fork(token=self_reviewer) as f: + f.make_commits(m, Commit('first', tree={'m': 'c1'}), ref='heads/change') + prx = repo.make_pr(title='title', body='body', target='master', head=f'{f.owner}:change', token=self_reviewer) repo.post_status(prx.head, 'success', 'legal/cla') repo.post_status(prx.head, 'success', 'ci/runbot') - prx.post_comment('hansen r+', config['role_self_reviewer']['token']) + prx.post_comment('hansen r+', self_reviewer) env.run_crons() assert prx.user == users['self_reviewer'] @@ -3099,23 +3281,23 @@ class TestReviewing(object): ]) with repo: - prx.post_review('COMMENT', "hansen priority=1", config['role_reviewer']['token']) - assert pr.priority == 1 + prx.post_review('COMMENT', "hansen priority", config['role_reviewer']['token']) + assert pr.priority == 'priority' assert pr.state == 'opened' with repo: - prx.post_review('APPROVE', "hansen priority=2", config['role_reviewer']['token']) - assert pr.priority == 2 + prx.post_review('APPROVE', "hansen default", config['role_reviewer']['token']) + assert pr.priority == 'default' assert pr.state == 'opened' with repo: - prx.post_review('REQUEST_CHANGES', 'hansen priority=1', config['role_reviewer']['token']) - assert pr.priority == 1 + prx.post_review('REQUEST_CHANGES', 'hansen priority', config['role_reviewer']['token']) + assert pr.priority == 'priority' assert pr.state == 'opened' with repo: prx.post_review('COMMENT', 'hansen r+', config['role_reviewer']['token']) - assert pr.priority == 1 + assert pr.priority == 'priority' assert pr.state == 'approved' def test_no_email(self, env, repo, users, config, partners): @@ -3143,7 +3325,7 @@ class TestReviewing(object): seen(env, pr, users), (users['reviewer'], 'hansen delegate+'), (users['user'], 'hansen r+'), - (users['user'], f"I'm sorry, @{users['user']}: I must know your email before you can review PRs. Please contact an administrator."), + (users['user'], f"@{users['user']} I must know your email before you can review PRs. Please contact an administrator."), ] user_partner.fetch_github_email() assert user_partner.email @@ -3152,6 +3334,29 @@ class TestReviewing(object): env.run_crons() assert to_pr(env, pr).state == 'approved' + @pytest.mark.usefixtures("reviewer_admin") + def test_skipchecks(self, env, repo, users, config): + """Skipcheck makes the PR immediately ready (if it's not in error or + something) + """ + with repo: + [m, _] = repo.make_commits( + None, + Commit("initial", tree={'m': 'm'}), + Commit("second", tree={"m2": "m2"}), + ref="heads/master" + ) + + [c1] = repo.make_commits(m, Commit('first', tree={'m': 'c1'})) + pr = repo.make_pr(title='title', target='master', head=c1) + pr.post_comment('hansen skipchecks', config['role_reviewer']['token']) + env.run_crons() + + pr_id = to_pr(env, pr) + # assert pr_id.state == 'ready' + assert not pr_id.blocked + # since the pr is not blocked it should have been staged by the relevant cron + assert pr_id.staging_id class TestUnknownPR: """ Sync PRs initially looked excellent but aside from the v4 API not @@ -3207,10 +3412,10 @@ class TestUnknownPR: seen(env, prx, users), (users['reviewer'], 'hansen r+'), (users['reviewer'], 'hansen r+'), - (users['user'], "I didn't know about this PR and had to " + seen(env, prx, users), + (users['user'], f"@{users['user']} I didn't know about this PR and had to " "retrieve its information, you may have to " "re-approve it as I didn't see previous commands."), - seen(env, prx, users), ] pr = env['runbot_merge.pull_requests'].search([ @@ -3260,12 +3465,83 @@ class TestUnknownPR: assert pr.comments == [ seen(env, pr, users), (users['reviewer'], 'hansen r+'), - (users['user'], "I didn't know about this PR and had to retrieve " + seen(env, pr, users), + # reviewer is set because fetch replays all the comments (thus + # setting r+ and reviewer) but then syncs the head commit thus + # unsetting r+ but leaving the reviewer + (users['user'], f"@{users['user']} I didn't know about this PR and had to retrieve " "its information, you may have to re-approve it " "as I didn't see previous commands."), - seen(env, pr, users), ] + def test_close_unknown_unmanaged(self, env, repo, users, config): + """If an "unknown PR" is *closed*, it should be saved as closed but not + commented on, because that's unnecessary spam. + """ + with repo: + m, _ = repo.make_commits( + None, + Commit('initial', tree={'m': 'm'}), + Commit('second', tree={'m2': 'm2'}), + ref='heads/master') + + [c1] = repo.make_commits(m, Commit('first', tree={'m': 'c1'})) + pr = repo.make_pr(title='title', body='body', target='master', head=c1) + env.run_crons() + assert pr.comments == [seen(env, pr, users)] + + to_pr(env, pr).unlink() + env['runbot_merge.commit'].search([('sha', '=', pr.head)]).unlink() + + with repo: + pr.close() + + Fetch = env['runbot_merge.fetch_job'] + fetches = Fetch.search([('repository', '=', repo.name), ('number', '=', pr.number)]) + assert len(fetches) == 1, f"expected one fetch for {pr.number}, found {len(fetches)}" + + env.run_crons('runbot_merge.fetch_prs_cron') + env.run_crons() + assert not Fetch.search([('repository', '=', repo.name), ('number', '=', pr.number)]) + + assert to_pr(env, pr).state == 'closed' + assert pr.comments == [seen(env, pr, users)] + + + def test_close_unknown_disabled(self, env, repo, users, config): + """If an "unknown PR" on an disabled branch is *closed*, it should be + saved as closed but not commented on, because that's unnecessary spam. + """ + with repo: + m, _ = repo.make_commits( + None, + Commit('initial', tree={'m': 'm'}), + Commit('second', tree={'m2': 'm2'}), + ref='heads/master') + + [c1] = repo.make_commits(m, Commit('first', tree={'m': 'c1'})) + pr = repo.make_pr(title='title', body='body', target='master', head=c1) + env.run_crons() + assert pr.comments == [seen(env, pr, users)] + + to_pr(env, pr).unlink() + env['runbot_merge.commit'].search([('sha', '=', pr.head)]).unlink() + env['runbot_merge.branch'].search([('name', '=', 'master')]).active = False + + with repo: + pr.close() + + Fetch = env['runbot_merge.fetch_job'] + fetches = Fetch.search([('repository', '=', repo.name), ('number', '=', pr.number)]) + assert len(fetches) == 1, f"expected one fetch for {pr.number}, found {len(fetches)}" + + env.run_crons('runbot_merge.fetch_prs_cron') + env.run_crons() + assert not Fetch.search([('repository', '=', repo.name), ('number', '=', pr.number)]) + + assert to_pr(env, pr).state == 'closed' + assert pr.comments == [seen(env, pr, users)] + def test_rplus_unmanaged(self, env, repo, users, config): """ r+ on an unmanaged target should notify about """ @@ -3282,7 +3558,6 @@ class TestUnknownPR: prx.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons( 'runbot_merge.fetch_prs_cron', - 'runbot_merge.feedback_cron', ) assert prx.comments == [ @@ -3307,7 +3582,6 @@ class TestUnknownPR: prx.post_review('APPROVE', 'hansen r+', config['role_reviewer']['token']) env.run_crons( 'runbot_merge.fetch_prs_cron', - 'runbot_merge.feedback_cron', ) # FIXME: either split out reviews in local or merge reviews & comments in remote @@ -3374,6 +3648,66 @@ class TestRecognizeCommands: (users['reviewer'], "hansen do the thing"), (users['reviewer'], "hansen @bobby-b r+ :+1:"), seen(env, pr, users), + (users['user'], """\ +@{reviewer} unknown command 'do'. + +For your own safety I've ignored *everything in your entire comment*. + +Currently available commands: + +|command|| +|-|-| +|`help`|displays this help| +|`r(eview)+`|approves the PR, if it's a forwardport also approves all non-detached parents| +|`r(eview)=<number>`|only approves the specified parents| +|`fw=no`|does not forward-port this PR| +|`fw=default`|forward-ports this PR normally| +|`fw=skipci`|does not wait for a forward-port's statuses to succeed before creating the next one| +|`up to <branch>`|only ports this PR forward to the specified branch (included)| +|`merge`|integrate the PR with a simple merge commit, using the PR description as message| +|`rebase-merge`|rebases the PR on top of the target branch the integrates with a merge commit, using the PR description as message| +|`rebase-ff`|rebases the PR on top of the target branch, then fast-forwards| +|`squash`|squashes the PR as a single commit on the target branch, using the PR description as message| +|`delegate+`|grants approval rights to the PR author| +|`delegate=<...>`|grants approval rights on this PR to the specified github users| +|`default`|stages the PR normally| +|`priority`|tries to stage this PR first, then adds `default` PRs if the staging has room| +|`alone`|stages this PR only with other PRs of the same priority| +|`cancel=staging`|automatically cancels the current staging when this PR becomes ready| +|`check`|fetches or refreshes PR metadata, resets mergebot state| + +Note: this help text is dynamic and will change with the state of the PR. +""".format_map(users)), + (users['user'], """\ +@{reviewer} unknown command '@bobby-b'. + +For your own safety I've ignored *everything in your entire comment*. + +Currently available commands: + +|command|| +|-|-| +|`help`|displays this help| +|`r(eview)+`|approves the PR, if it's a forwardport also approves all non-detached parents| +|`r(eview)=<number>`|only approves the specified parents| +|`fw=no`|does not forward-port this PR| +|`fw=default`|forward-ports this PR normally| +|`fw=skipci`|does not wait for a forward-port's statuses to succeed before creating the next one| +|`up to <branch>`|only ports this PR forward to the specified branch (included)| +|`merge`|integrate the PR with a simple merge commit, using the PR description as message| +|`rebase-merge`|rebases the PR on top of the target branch the integrates with a merge commit, using the PR description as message| +|`rebase-ff`|rebases the PR on top of the target branch, then fast-forwards| +|`squash`|squashes the PR as a single commit on the target branch, using the PR description as message| +|`delegate+`|grants approval rights to the PR author| +|`delegate=<...>`|grants approval rights on this PR to the specified github users| +|`default`|stages the PR normally| +|`priority`|tries to stage this PR first, then adds `default` PRs if the staging has room| +|`alone`|stages this PR only with other PRs of the same priority| +|`cancel=staging`|automatically cancels the current staging when this PR becomes ready| +|`check`|fetches or refreshes PR metadata, resets mergebot state| + +Note: this help text is dynamic and will change with the state of the PR. +""".format_map(users)), ] class TestRMinus: @@ -3554,41 +3888,6 @@ class TestRMinus: assert pr2.state == 'validated', "state should have been reset" assert not env['runbot_merge.split'].search([]), "there should be no split left" - def test_rminus_p0(self, env, repo, config, users): - """ In and of itself r- doesn't do anything on p=0 since they bypass - approval, so unstage and downgrade to p=1. - """ - - with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/master', m) - - c = repo.make_commit(m, 'first', None, tree={'m': 'c'}) - prx = repo.make_pr(title='title', body=None, target='master', head=c) - repo.post_status(prx.head, 'success', 'ci/runbot') - repo.post_status(prx.head, 'success', 'legal/cla') - prx.post_comment('hansen p=0', config['role_reviewer']['token']) - env.run_crons() - - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number), - ]) - assert pr.priority == 0 - assert pr.staging_id - - with repo: - prx.post_comment('hansen r-', config['role_reviewer']['token']) - env.run_crons() - assert not pr.staging_id, "pr should have been unstaged" - assert pr.priority == 1, "priority should have been downgraded" - assert prx.comments == [ - (users['reviewer'], 'hansen p=0'), - seen(env, prx, users), - (users['reviewer'], 'hansen r-'), - (users['user'], "PR priority reset to 1, as pull requests with priority 0 ignore review state."), - ] - class TestComments: def test_address_method(self, repo, env, config): with repo: @@ -3664,28 +3963,34 @@ class TestFeedback: def test_ci_approved(self, repo, env, users, config): """CI failing on an r+'d PR sends feedback""" with repo: - m = repo.make_commit(None, 'initial', None, tree={'m': 'm'}) - repo.make_ref('heads/master', m) + [m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref="heads/master") - c1 = repo.make_commit(m, 'first', None, tree={'m': 'c1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c1) - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]) - - with repo: - prx.post_comment('hansen r+', config['role_reviewer']['token']) - assert pr.state == 'approved' - - with repo: - repo.post_status(prx.head, 'failure', 'ci/runbot') + [c1] = repo.make_commits(m, Commit('first', tree={'m': 'c1'})) + pr = repo.make_pr(title='title', body='body', target='master', head=c1) + pr.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() - assert prx.comments == [ + pr_id = to_pr(env, pr) + assert pr_id.state == 'approved' + + for ctx, url in [ + ('ci/runbot', 'https://a'), + ('ci/runbot', 'https://a'), + ('legal/cla', 'https://b'), + ('foo/bar', 'https://c'), + ('ci/runbot', 'https://a'), + ('legal/cla', 'https://d'), # url changes so different from the previous + ]: + with repo: + repo.post_status(pr_id.head, 'failure', ctx, target_url=url) + env.run_crons() + + assert pr.comments == [ (users['reviewer'], 'hansen r+'), - seen(env, prx, users), - (users['user'], "@%(user)s @%(reviewer)s 'ci/runbot' failed on this reviewed PR." % users) + seen(env, pr, users), + (users['user'], "@{user} @{reviewer} 'ci/runbot' failed on this reviewed PR.".format_map(users)), + (users['user'], "@{user} @{reviewer} 'legal/cla' failed on this reviewed PR.".format_map(users)), + (users['user'], "@{user} @{reviewer} 'legal/cla' failed on this reviewed PR.".format_map(users)), ] def test_review_failed(self, repo, env, users, config): @@ -3735,7 +4040,7 @@ class TestInfrastructure: assert repo.get_ref('heads/master') == m1 def node(name, *children): - assert type(name) in (str, re_matches) + assert type(name) in (str, matches) return name, frozenset(children) def log_to_node(log): log = list(log) diff --git a/runbot_merge/tests/test_batch_consistency.py b/runbot_merge/tests/test_batch_consistency.py new file mode 100644 index 00000000..0cc3b8b0 --- /dev/null +++ b/runbot_merge/tests/test_batch_consistency.py @@ -0,0 +1,201 @@ +"""This module tests edge cases specific to the batch objects themselves, +without wider relevance and thus other location. +""" +import pytest + +from utils import Commit, to_pr, pr_page + + +def test_close_single(env, repo): + """If a batch has a single PR and that PR gets closed, the batch should be + inactive *and* blocked. + """ + with repo: + repo.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master') + [c] = repo.make_commits('master', Commit('b', tree={"b": "b"})) + pr = repo.make_pr(head=c, target='master') + env.run_crons() + + pr_id = to_pr(env, pr) + batch_id = pr_id.batch_id + assert pr_id.state == 'opened' + assert batch_id.blocked + Batches = env['runbot_merge.batch'] + assert Batches.search_count([]) == 1 + + with repo: + pr.close() + + assert pr_id.state == 'closed' + assert batch_id.all_prs == pr_id + assert batch_id.prs == pr_id.browse(()) + assert batch_id.blocked == "all prs are closed" + assert not batch_id.active + + assert Batches.search_count([]) == 0 + +def test_close_multiple(env, make_repo2): + Batches = env['runbot_merge.batch'] + repo1 = make_repo2('wheee') + repo2 = make_repo2('wheeee') + + with repo1: + repo1.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master') + repo1.make_commits('master', Commit('b', tree={"b": "b"}), ref='heads/a_pr') + pr1 = repo1.make_pr(head='a_pr', target='master') + + with repo2: + repo2.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master') + repo2.make_commits('master', Commit('b', tree={"b": "b"}), ref='heads/a_pr') + pr2 = repo2.make_pr(head='a_pr', target='master') + + pr1_id = to_pr(env, pr1) + pr2_id = to_pr(env, pr2) + batch_id = pr1_id.batch_id + assert pr2_id.batch_id == batch_id + + assert pr1_id.state == 'opened' + assert pr2_id.state == 'opened' + assert batch_id.all_prs == pr1_id | pr2_id + assert batch_id.prs == pr1_id | pr2_id + assert batch_id.active + assert Batches.search_count([]) == 1 + + with repo1: + pr1.close() + + assert pr1_id.state == 'closed' + assert pr2_id.state == 'opened' + assert batch_id.all_prs == pr1_id | pr2_id + assert batch_id.prs == pr2_id + assert batch_id.active + assert Batches.search_count([]) == 1 + + with repo2: + pr2.close() + + assert pr1_id.state == 'closed' + assert pr2_id.state == 'closed' + assert batch_id.all_prs == pr1_id | pr2_id + assert batch_id.prs == env['runbot_merge.pull_requests'].browse(()) + assert not batch_id.active + assert Batches.search_count([]) == 0 + +def test_inconsistent_target(env, project, make_repo2, users, page, config): + """If a batch's PRs have inconsistent targets, + + - only open PRs should count + - it should be clearly notified on the dash + - the dash should not get hopelessly lost + - there should be a wizard to split the batch / move a PR to a separate batch + """ + # region setup + Batches = env['runbot_merge.batch'] + repo1 = make_repo2('whe') + repo2 = make_repo2('whee') + repo3 = make_repo2('wheee') + project.write({'branch_ids': [(0, 0, {'name': 'other'})]}) + + with repo1: + [m] = repo1.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master') + repo1.make_ref('heads/other', m) + repo1.make_commits('master', Commit('b', tree={"b": "b"}), ref='heads/a_pr') + pr1 = repo1.make_pr(head='a_pr', target='master') + + repo1.make_commits('master', Commit('b', tree={"c": "c"}), ref='heads/something_else') + pr_other = repo1.make_pr(head='something_else', target='master') + + with repo2: + [m] = repo2.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master') + repo2.make_ref("heads/other", m) + repo2.make_commits('master', Commit('b', tree={"b": "b"}), ref='heads/a_pr') + pr2 = repo2.make_pr(head='a_pr', target='master') + + with repo3: + [m] = repo3.make_commits(None, Commit("a", tree={"a": "a"}), ref='heads/master') + repo3.make_ref("heads/other", m) + repo3.make_commits('master', Commit('b', tree={"b": "b"}), ref='heads/a_pr') + pr3 = repo3.make_pr(head='a_pr', target='master') + + assert repo1.owner == repo2.owner == repo3.owner + owner = repo1.owner + # endregion + + # region closeable consistency + + [b] = Batches.search([('all_prs.label', '=', f'{owner}:a_pr')]) + assert b.target.name == 'master' + assert len(b.prs) == 3 + assert len(b.all_prs) == 3 + + with repo3: + pr3.base = 'other' + assert b.target.name == False + assert len(b.prs) == 3 + assert len(b.all_prs) == 3 + + with repo3: + pr3.close() + assert b.target.name == 'master' + assert len(b.prs) == 2 + assert len(b.all_prs) == 3 + # endregion + + # region split batch + pr1_id = to_pr(env, pr1) + pr2_id = to_pr(env, pr2) + with repo2: + pr2.base = 'other' + + pr2_dashboard = pr_page(page, pr2) + # The dashboard should have an alert + s = pr2_dashboard.cssselect('.alert.alert-danger') + assert s, "the dashboard should have an alert" + assert s[0].text_content().strip() == f"""\ +Inconsistent targets: + +{pr1_id.display_name} has target 'master' +{pr2_id.display_name} has target 'other'\ +""" + assert not pr2_dashboard.cssselect('table'), "the batches table should be suppressed" + + assert b.target.name == False + assert to_pr(env, pr_other).label == f'{owner}:something_else' + # try staging + with repo1: + pr1.post_comment("hansen r+", config['role_reviewer']['token']) + repo1.post_status(pr1.head, "success") + with repo2: + pr2.post_comment("hansen r+", config['role_reviewer']['token']) + repo2.post_status(pr2.head, "success") + env.run_crons() + assert not pr1_id.blocked + assert not pr2_id.blocked + assert b.blocked == "Multiple target branches: 'other, master'" + assert env['runbot_merge.stagings'].search_count([]) == 0 + + act = pr2_id.button_split() + assert act['type'] == 'ir.actions.act_window' + assert act['views'] == [[False, 'form']] + assert act['target'] == 'new' + w = env[act['res_model']].browse([act['res_id']]) + w.new_label = f"{owner}:something_else" + with pytest.raises(Exception): + w.button_apply() + w.new_label = f"{owner}:blah-blah-blah" + w.button_apply() + + assert pr2_id.label == f"{owner}:blah-blah-blah" + assert pr2_id.batch_id != to_pr(env, pr1).batch_id + assert b.target.name == 'master' + assert len(b.prs) == 1, "the PR has been moved off of this batch entirely" + assert len(b.all_prs) == 2 + # endregion + + assert not pr1_id.blocked + assert not pr1_id.batch_id.blocked + assert not pr2_id.blocked + assert not pr2_id.batch_id.blocked + env.run_crons() + + assert env['runbot_merge.stagings'].search_count([]) diff --git a/runbot_merge/tests/test_by_branch.py b/runbot_merge/tests/test_by_branch.py index 4122bf82..9134a4e2 100644 --- a/runbot_merge/tests/test_by_branch.py +++ b/runbot_merge/tests/test_by_branch.py @@ -2,28 +2,21 @@ import pytest from utils import Commit - @pytest.fixture -def repo(env, project, make_repo, users, setreviewers): - r = make_repo('repo') - project.write({ - 'repo_ids': [(0, 0, { - 'name': r.name, - 'status_ids': [ - (0, 0, {'context': 'ci'}), - # require the lint status on master - (0, 0, { - 'context': 'lint', - 'branch_filter': [('id', '=', project.branch_ids.id)] - }), - (0, 0, {'context': 'pr', 'stagings': False}), - (0, 0, {'context': 'staging', 'prs': False}), - ] - })], - }) - setreviewers(*project.repo_ids) - return r +def _setup_statuses(project, repo): + project.repo_ids.status_ids = [ + (5, 0, 0), + (0, 0, {'context': 'ci'}), + # require the lint status on master + (0, 0, { + 'context': 'lint', + 'branch_filter': [('id', '=', project.branch_ids.id)] + }), + (0, 0, {'context': 'pr', 'stagings': False}), + (0, 0, {'context': 'staging', 'prs': False}), + ] +@pytest.mark.usefixtures('_setup_statuses') def test_status_applies(env, repo, config): """ If branches are associated with a repo status, only those branch should require the status on their PRs & stagings @@ -41,15 +34,15 @@ def test_status_applies(env, repo, config): with repo: repo.post_status(c, 'success', 'ci') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'opened' with repo: repo.post_status(c, 'success', 'pr') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'opened' with repo: repo.post_status(c, 'success', 'lint') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'validated' with repo: @@ -60,17 +53,18 @@ def test_status_applies(env, repo, config): assert st.state == 'pending' with repo: repo.post_status('staging.master', 'success', 'ci') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'pending' with repo: repo.post_status('staging.master', 'success', 'lint') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'pending' with repo: repo.post_status('staging.master', 'success', 'staging') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'success' +@pytest.mark.usefixtures('_setup_statuses') def test_status_skipped(env, project, repo, config): """ Branches not associated with a repo status should not require the status on their PRs or stagings @@ -90,11 +84,11 @@ def test_status_skipped(env, project, repo, config): with repo: repo.post_status(c, 'success', 'ci') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'opened' with repo: repo.post_status(c, 'success', 'pr') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'validated' with repo: @@ -105,11 +99,11 @@ def test_status_skipped(env, project, repo, config): assert st.state == 'pending' with repo: repo.post_status('staging.maintenance', 'success', 'staging') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'pending' with repo: repo.post_status('staging.maintenance', 'success', 'ci') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'success' def test_pseudo_version_tag(env, project, make_repo, setreviewers, config): @@ -132,6 +126,7 @@ def test_pseudo_version_tag(env, project, make_repo, setreviewers, config): ], }) setreviewers(*project.repo_ids) + env['runbot_merge.events_sources'].create({'repository': repo.name}) with repo: [m] = repo.make_commits(None, Commit('c1', tree={'a': '1'}), ref='heads/master') diff --git a/runbot_merge/tests/test_dfm.py b/runbot_merge/tests/test_dfm.py new file mode 100644 index 00000000..1fac6203 --- /dev/null +++ b/runbot_merge/tests/test_dfm.py @@ -0,0 +1,72 @@ +from odoo.addons.runbot_merge.models.utils import dfm + +def test_odoo_links(): + assert dfm("", "OPW-42") == '<p><a href="https://www.odoo.com/web#model=project.task&id=42">opw-42</a></p>' + assert dfm("", "taskid : 42") == '<p><a href="https://www.odoo.com/web#model=project.task&id=42">task-42</a></p>' + assert dfm("", "I was doing task foo") == '<p>I was doing task foo</p>' + assert dfm("", "Task 687d3") == "<p>Task 687d3</p>" + +def p(*content): + return f'<p>{"".join(content)}</p>' +def a(label, url): + return f'<a href="{url}">{label}</a>' +def test_gh_issue_links(): + # same-repository link + assert dfm("odoo/runbot", "thing thing #26") == p("thing thing ", a('#26', 'https://github.com/odoo/runbot/issues/26')) + assert dfm("odoo/runbot", "GH-26") == p(a('GH-26', 'https://github.com/odoo/runbot/issues/26')) + assert dfm( + "odoo/runbot", "https://github.com/odoo/runbot/issues/26" + ) == p(a('#26', 'https://github.com/odoo/runbot/issues/26')) + + # cross-repo link + assert dfm( + "odoo/runbot", "jlord/sheetsee.js#26" + ) == p(a('jlord/sheetsee.js#26', 'https://github.com/jlord/sheetsee.js/issues/26')) + assert dfm( + "odoo/runbot", "https://github.com/jlord/sheetsee.js/pull/26" + ) == p(a('jlord/sheetsee.js#26', 'https://github.com/jlord/sheetsee.js/issues/26')) + + # cross-repo link with comment + assert dfm( + "odoo/runbot", "https://github.com/odoo/odoo/pull/173061#issuecomment-2227874482" + ) == p(a("odoo/odoo#173061 (comment)", "https://github.com/odoo/odoo/issues/173061#issuecomment-2227874482")) + + +def test_gh_commit_link(): + # same repository + assert dfm( + "odoo/runbot", "https://github.com/odoo/runbot/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e" + ) == p(a("a5c3785ed8d6a35868bc169f07e40e889087fd2e", "https://github.com/odoo/runbot/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e")) + # cross fork + assert dfm( + "odoo/runbot", "jlord@a5c3785ed8d6a35868bc169f07e40e889087fd2e" + ) == p(a("jlord@a5c3785ed8d6a35868bc169f07e40e889087fd2e", "https://github.com/jlord/runbot/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e")) + assert dfm( + "odoo/runbot", "https://github.com/jlord/runbot/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e" + ) == p(a("jlord@a5c3785ed8d6a35868bc169f07e40e889087fd2e", "https://github.com/jlord/runbot/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e")) + # cross repo + assert dfm( + "odoo/runbot", "jlord/sheetsee.js@a5c3785ed8d6a35868bc169f07e40e889087fd2e" + ) == p(a("jlord/sheetsee.js@a5c3785ed8d6a35868bc169f07e40e889087fd2e", "https://github.com/jlord/sheetsee.js/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e")) + assert dfm( + "odoo/runbot", "https://github.com/jlord/sheetsee.js/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e" + ) == p(a("jlord/sheetsee.js@a5c3785ed8d6a35868bc169f07e40e889087fd2e", "https://github.com/jlord/sheetsee.js/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e")) + +def test_standalone_hash(): + assert dfm( + "odoo/runbot", "a5c3785ed8d6a35868bc169f07e40e889087fd2e" + ) == p(a("a5c3785ed8d6a35868bc169f07e40e889087fd2e", "https://github.com/odoo/runbot/commit/a5c3785ed8d6a35868bc169f07e40e889087fd2e")) + assert dfm( + "odoo/runbot", "a5c3785ed8d6a35868bc169f07e4" + ) == p(a("a5c3785ed8d6a35868bc169f07e4", "https://github.com/odoo/runbot/commit/a5c3785ed8d6a35868bc169f07e4")) + assert dfm( + "odoo/runbot", "a5c3785" + ) == p(a("a5c3785", "https://github.com/odoo/runbot/commit/a5c3785")) + assert dfm( + "odoo/runbot", "a5c378" + ) == p("a5c378") + +def test_ignore_tel(): + assert dfm("", "[ok](https://github.com)") == p(a("ok", "https://github.com")) + assert dfm("", "[nope](tel:+1-212-555-0100)") == "<p>nope</p>" + assert dfm("", "[lol](rdar://10198949)") == "<p>lol</p>" diff --git a/runbot_merge/tests/test_disabled_branch.py b/runbot_merge/tests/test_disabled_branch.py index f56ea125..88e5e490 100644 --- a/runbot_merge/tests/test_disabled_branch.py +++ b/runbot_merge/tests/test_disabled_branch.py @@ -1,9 +1,15 @@ +import pytest + from utils import seen, Commit, pr_page def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, config, users, page): """ PRs to disabled branches are ignored, but what if the PR exists *before* the branch is disabled? """ + # run crons from template to clean up the queue before possibly creating + # new work + assert env['base'].run_crons() + repo = make_repo('repo') project.branch_ids.sequence = 0 project.write({'branch_ids': [ @@ -17,6 +23,7 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf 'group_id': False, }) setreviewers(*project.repo_ids) + env['runbot_merge.events_sources'].create({'repository': repo.name}) with repo: [m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master') @@ -38,10 +45,21 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf staging_id = branch_id.active_staging_id assert staging_id == pr_id.staging_id + # staging of `pr` should have generated a staging branch + _ = repo.get_ref('heads/staging.other') + # stagings should not need a tmp branch anymore, so this should not exist + with pytest.raises(AssertionError, match=r'Not Found'): + repo.get_ref('heads/tmp.other') + # disable branch "other" branch_id.active = False env.run_crons() + # triggered cleanup should have deleted the staging for the disabled `other` + # target branch + with pytest.raises(AssertionError, match=r'Not Found'): + repo.get_ref('heads/staging.other') + # the PR should not have been closed implicitly assert pr_id.state == 'ready' # but it should be unstaged @@ -50,20 +68,17 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf assert not branch_id.active_staging_id assert staging_id.state == 'cancelled', \ "closing the PRs should have canceled the staging" - assert staging_id.reason == f"Target branch deactivated by 'admin'." + assert staging_id.reason == "Target branch deactivated by 'admin'." p = pr_page(page, pr) - target = dict(zip( - (e.text for e in p.cssselect('dl.runbot-merge-fields dt')), - (p.cssselect('dl.runbot-merge-fields dd')) - ))['target'] - assert target.text_content() == 'other (inactive)' - assert target.get('class') == 'text-muted bg-warning' + [target] = p.cssselect('table tr.bg-info') + assert 'inactive' in target.classes + assert target[0].text_content() == "other" assert pr.comments == [ (users['reviewer'], "hansen r+"), seen(env, pr, users), - (users['user'], "Hey @%(user)s @%(reviewer)s the target branch 'other' has been disabled, you may want to close this PR." % users), + (users['user'], "@%(user)s @%(reviewer)s the target branch 'other' has been disabled, you may want to close this PR." % users), ] with repo: @@ -81,6 +96,11 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf assert pr_id.target == env['runbot_merge.branch'].search([('name', '=', 'other2')]) assert pr_id.staging_id + # staging of `pr` should have generated a staging branch + _ = repo.get_ref('heads/staging.other2') + # stagings should not need a tmp branch anymore, so this should not exist + with pytest.raises(AssertionError, match=r'Not Found'): + repo.get_ref('heads/tmp.other2') def test_new_pr_no_branch(env, project, make_repo, setreviewers, users): """ A new PR to an *unknown* branch should be ignored and warn @@ -92,6 +112,7 @@ def test_new_pr_no_branch(env, project, make_repo, setreviewers, users): 'status_ids': [(0, 0, {'context': 'status'})] }) setreviewers(*project.repo_ids) + env['runbot_merge.events_sources'].create({'repository': repo.name}) with repo: [m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master') @@ -125,6 +146,7 @@ def test_new_pr_disabled_branch(env, project, make_repo, setreviewers, users): 'active': False, }) setreviewers(*project.repo_ids) + env['runbot_merge.events_sources'].create({'repository': repo.name}) with repo: [m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master') diff --git a/runbot_merge/tests/test_multirepo.py b/runbot_merge/tests/test_multirepo.py index b94a1ec9..0efb7cfc 100644 --- a/runbot_merge/tests/test_multirepo.py +++ b/runbot_merge/tests/test_multirepo.py @@ -5,9 +5,11 @@ source branches). When preparing a staging, we simply want to ensure branch-matched PRs are staged concurrently in all repos """ -import json +import functools +import operator import time import xmlrpc.client +from itertools import repeat import pytest import requests @@ -17,43 +19,46 @@ from utils import seen, get_partner, pr_page, to_pr, Commit @pytest.fixture -def repo_a(project, make_repo, setreviewers): +def repo_a(env, project, make_repo, setreviewers): repo = make_repo('a') - r = project.env['runbot_merge.repository'].create({ + r = env['runbot_merge.repository'].create({ 'project_id': project.id, 'name': repo.name, - 'required_statuses': 'legal/cla,ci/runbot', + 'required_statuses': 'default', 'group_id': False, }) setreviewers(r) + env['runbot_merge.events_sources'].create({'repository': r.name}) return repo @pytest.fixture -def repo_b(project, make_repo, setreviewers): +def repo_b(env, project, make_repo, setreviewers): repo = make_repo('b') - r = project.env['runbot_merge.repository'].create({ + r = env['runbot_merge.repository'].create({ 'project_id': project.id, 'name': repo.name, - 'required_statuses': 'legal/cla,ci/runbot', + 'required_statuses': 'default', 'group_id': False, }) setreviewers(r) + env['runbot_merge.events_sources'].create({'repository': r.name}) return repo @pytest.fixture -def repo_c(project, make_repo, setreviewers): +def repo_c(env, project, make_repo, setreviewers): repo = make_repo('c') - r = project.env['runbot_merge.repository'].create({ + r = env['runbot_merge.repository'].create({ 'project_id': project.id, 'name': repo.name, - 'required_statuses': 'legal/cla,ci/runbot', + 'required_statuses': 'default', 'group_id': False, }) setreviewers(r) + env['runbot_merge.events_sources'].create({'repository': r.name}) return repo def make_pr(repo, prefix, trees, *, target='master', user, - statuses=(('ci/runbot', 'success'), ('legal/cla', 'success')), + statuses=(('default', 'success'),), reviewer): """ :type repo: fake_github.Repo @@ -81,27 +86,23 @@ def make_pr(repo, prefix, trees, *, target='master', user, pr.post_comment('hansen r+', reviewer) return pr -def make_branch(repo, name, message, tree, protect=True): - c = repo.make_commit(None, message, None, tree=tree) - repo.make_ref('heads/%s' % name, c) - if protect: - repo.protect(name) - return c -def test_stage_one(env, project, repo_a, repo_b, config): +@pytest.mark.parametrize('uniquifier', [False, True]) +def test_stage_one(env, project, repo_a, repo_b, config, uniquifier): """ First PR is non-matched from A => should not select PR from B """ + project.uniquifier = uniquifier project.batch_limit = 1 with repo_a: - make_branch(repo_a, 'master', 'initial', {'a': 'a_0'}) + repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master') pr_a = make_pr( repo_a, 'A', [{'a': 'a_1'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']) with repo_b: - make_branch(repo_b, 'master', 'initial', {'a': 'b_0'}) + repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master') pr_b = make_pr( repo_b, 'B', [{'a': 'b_1'}], user=config['role_user']['token'], @@ -113,7 +114,10 @@ def test_stage_one(env, project, repo_a, repo_b, config): assert pra_id.state == 'ready' assert pra_id.staging_id assert repo_a.commit('staging.master').message.startswith('commit_A_00') - assert repo_b.commit('staging.master').message.startswith('force rebuild') + if uniquifier: + assert repo_b.commit('staging.master').message.startswith('force rebuild') + else: + assert repo_b.commit('staging.master').message == 'initial' prb_id = to_pr(env, pr_b) assert prb_id.state == 'ready' @@ -126,14 +130,14 @@ def test_stage_match(env, project, repo_a, repo_b, config, page): project.batch_limit = 1 with repo_a: - make_branch(repo_a, 'master', 'initial', {'a': 'a_0'}) + repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master') prx_a = make_pr( repo_a, 'do-a-thing', [{'a': 'a_1'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], ) with repo_b: - make_branch(repo_b, 'master', 'initial', {'a': 'b_0'}) + repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master') prx_b = make_pr(repo_b, 'do-a-thing', [{'a': 'b_1'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], @@ -159,7 +163,7 @@ def test_stage_match(env, project, repo_a, repo_b, config, page): assert get_related_pr_labels(pr_page(page, prx_a)) == [pr_b.display_name] assert get_related_pr_labels(pr_page(page, prx_b)) == [pr_a.display_name] with repo_a: - repo_a.post_status('staging.master', 'failure', 'legal/cla') + repo_a.post_status('staging.master', 'failure') env.run_crons() assert pr_a.state == 'error' @@ -173,8 +177,7 @@ def test_stage_match(env, project, repo_a, repo_b, config, page): assert pr_a.staging_id and pr_b.staging_id for repo in [repo_a, repo_b]: with repo: - repo.post_status('staging.master', 'success', 'legal/cla') - repo.post_status('staging.master', 'success', 'ci/runbot') + repo.post_status('staging.master', 'success') env.run_crons() assert pr_a.state == 'merged' assert pr_b.state == 'merged' @@ -182,7 +185,6 @@ def test_stage_match(env, project, repo_a, repo_b, config, page): assert 'Related: {}'.format(pr_b.display_name) in repo_a.commit('master').message assert 'Related: {}'.format(pr_a.display_name) in repo_b.commit('master').message - print(pr_a.batch_ids.read(['staging_id', 'prs'])) # check that related PRs *still* link to one another after merge assert get_related_pr_labels(pr_page(page, prx_a)) == [pr_b.display_name] assert get_related_pr_labels(pr_page(page, prx_b)) == [pr_a.display_name] @@ -195,8 +197,8 @@ def test_different_targets(env, project, repo_a, repo_b, config): 'branch_ids': [(0, 0, {'name': 'other'})] }) with repo_a: - make_branch(repo_a, 'master', 'initial', {'master': 'a_0'}) - make_branch(repo_a, 'other', 'initial', {'other': 'a_0'}) + repo_a.make_commits(None, Commit('initial', tree={'master': 'a_0'}), ref='heads/master') + repo_a.make_commits(None, Commit('initial', tree={'other': 'a_0'}), ref='heads/other') pr_a = make_pr( repo_a, 'do-a-thing', [{'mater': 'a_1'}], target='master', @@ -204,8 +206,8 @@ def test_different_targets(env, project, repo_a, repo_b, config): reviewer=config['role_reviewer']['token'], ) with repo_b: - make_branch(repo_b, 'master', 'initial', {'master': 'b_0'}) - make_branch(repo_b, 'other', 'initial', {'other': 'b_0'}) + repo_b.make_commits(None, Commit('initial', tree={'master': 'b_0'}), ref='heads/master') + repo_b.make_commits(None, Commit('initial', tree={'other': 'b_0'}), ref='heads/other') pr_b = make_pr( repo_b, 'do-a-thing', [{'other': 'b_1'}], target='other', @@ -228,8 +230,7 @@ def test_different_targets(env, project, repo_a, repo_b, config): for r in [repo_a, repo_b]: with r: - r.post_status('staging.master', 'success', 'legal/cla') - r.post_status('staging.master', 'success', 'ci/runbot') + r.post_status('staging.master', 'success') env.run_crons() assert pr_a.state == 'merged' @@ -243,7 +244,7 @@ def test_stage_different_statuses(env, project, repo_a, repo_b, config): }) with repo_a: - make_branch(repo_a, 'master', 'initial', {'a': 'a_0'}) + repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master') pr_a = make_pr( repo_a, 'do-a-thing', [{'a': 'a_1'}], user=config['role_user']['token'], @@ -251,17 +252,16 @@ def test_stage_different_statuses(env, project, repo_a, repo_b, config): ) repo_a.post_status(pr_a.head, 'success', 'foo/bar') with repo_b: - make_branch(repo_b, 'master', 'initial', {'a': 'b_0'}) + repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master') [c] = repo_b.make_commits( 'heads/master', - repo_b.Commit('some_commit\n\nSee also %s#%d' % (repo_a.name, pr_a.number), tree={'a': 'b_1'}), + repo_b.Commit(f'some_commit\n\nSee also {repo_a.name}#{pr_a.number:d}', tree={'a': 'b_1'}), ref='heads/do-a-thing' ) pr_b = repo_b.make_pr( title="title", body="body", target='master', head='do-a-thing', token=config['role_user']['token']) - repo_b.post_status(c, 'success', 'ci/runbot') - repo_b.post_status(c, 'success', 'legal/cla') + repo_b.post_status(c, 'success') pr_b.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() # since the labels are the same but the statuses on pr_b are not the @@ -285,8 +285,7 @@ def test_stage_different_statuses(env, project, repo_a, repo_b, config): # do the actual merge to check for the Related header for repo in [repo_a, repo_b]: with repo: - repo.post_status('staging.master', 'success', 'legal/cla') - repo.post_status('staging.master', 'success', 'ci/runbot') + repo.post_status('staging.master', 'success') repo.post_status('staging.master', 'success', 'foo/bar') env.run_crons() @@ -315,14 +314,14 @@ def test_unmatch_patch(env, project, repo_a, repo_b, config): """ project.batch_limit = 1 with repo_a: - make_branch(repo_a, 'master', 'initial', {'a': 'a_0'}) + repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master') pr_a = make_pr( repo_a, 'patch-1', [{'a': 'a_1'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], ) with repo_b: - make_branch(repo_b, 'master', 'initial', {'a': 'b_0'}) + repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref=f'heads/master') pr_b = make_pr( repo_b, 'patch-1', [{'a': 'b_1'}], user=config['role_user']['token'], @@ -342,16 +341,16 @@ def test_sub_match(env, project, repo_a, repo_b, repo_c, config): """ project.batch_limit = 1 with repo_a: # no pr here - make_branch(repo_a, 'master', 'initial', {'a': 'a_0'}) + repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master') with repo_b: - make_branch(repo_b, 'master', 'initial', {'a': 'b_0'}) + repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master') pr_b = make_pr( repo_b, 'do-a-thing', [{'a': 'b_1'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], ) with repo_c: - make_branch(repo_c, 'master', 'initial', {'a': 'c_0'}) + repo_c.make_commits(None, Commit('initial', tree={'a': 'c_0'}), ref='heads/master') pr_c = make_pr( repo_c, 'do-a-thing', [{'a': 'c_1'}], user=config['role_user']['token'], @@ -373,14 +372,30 @@ def test_sub_match(env, project, repo_a, repo_b, repo_c, config): a_staging = repo_a.commit('staging.master') b_staging = repo_b.commit('staging.master') c_staging = repo_c.commit('staging.master') - assert json.loads(st.heads) == { - repo_a.name: a_staging.id, - repo_a.name + '^': a_staging.parents[0], - repo_b.name: b_staging.id, - repo_b.name + '^': b_staging.id, - repo_c.name: c_staging.id, - repo_c.name + '^': c_staging.id, - } + assert sorted(st.head_ids.mapped('sha')) == sorted([ + a_staging.id, + b_staging.id, + c_staging.id, + ]) + s = env['runbot_merge.stagings'].for_heads( + a_staging.id, + b_staging.id, + c_staging.id, + ) + assert s == list(st.ids) + + assert sorted(st.commit_ids.mapped('sha')) == sorted([ + a_staging.parents[0], + b_staging.id, + c_staging.id, + ]) + s = env['runbot_merge.stagings'].for_commits( + a_staging.parents[0], + b_staging.id, + c_staging.id, + ) + assert s == list(st.ids) + def test_merge_fail(env, project, repo_a, repo_b, users, config): """ In a matched-branch scenario, if merging in one of the linked repos @@ -389,8 +404,8 @@ def test_merge_fail(env, project, repo_a, repo_b, users, config): project.batch_limit = 1 with repo_a, repo_b: - make_branch(repo_a, 'master', 'initial', {'a': 'a_0'}) - make_branch(repo_b, 'master', 'initial', {'a': 'b_0'}) + repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master') + repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master') # first set of matched PRs pr1a = make_pr( @@ -455,14 +470,14 @@ def test_ff_fail(env, project, repo_a, repo_b, config): project.batch_limit = 1 with repo_a, repo_b: - root_a = make_branch(repo_a, 'master', 'initial', {'a': 'a_0'}) + [root_a] = repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master') make_pr( repo_a, 'do-a-thing', [{'a': 'a_1'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], ) - make_branch(repo_b, 'master', 'initial', {'a': 'b_0'}) + repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref=f'heads/master') make_pr( repo_b, 'do-a-thing', [{'a': 'b_1'}], user=config['role_user']['token'], @@ -476,11 +491,9 @@ def test_ff_fail(env, project, repo_a, repo_b, config): assert repo_b.commit('heads/master').id == cn with repo_a, repo_b: - repo_a.post_status('heads/staging.master', 'success', 'ci/runbot') - repo_a.post_status('heads/staging.master', 'success', 'legal/cla') - repo_b.post_status('heads/staging.master', 'success', 'ci/runbot') - repo_b.post_status('heads/staging.master', 'success', 'legal/cla') - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + repo_a.post_status('heads/staging.master', 'success') + repo_b.post_status('heads/staging.master', 'success') + env.run_crons(None) assert repo_b.commit('heads/master').id == cn,\ "B should still be at the conflicting commit" assert repo_a.commit('heads/master').id == root_a,\ @@ -498,7 +511,7 @@ class TestCompanionsNotReady: """ project.batch_limit = 1 with repo_a, repo_b: - make_branch(repo_a, 'master', 'initial', {'a': 'a_0'}) + repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master') # pr_a is born ready p_a = make_pr( repo_a, 'do-a-thing', [{'a': 'a_1'}], @@ -506,7 +519,7 @@ class TestCompanionsNotReady: reviewer=config['role_reviewer']['token'], ) - make_branch(repo_b, 'master', 'initial', {'a': 'b_0'}) + repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master') p_b = make_pr( repo_b, 'do-a-thing', [{'a': 'b_1'}], user=config['role_user']['token'], @@ -553,21 +566,21 @@ class TestCompanionsNotReady: """ project.batch_limit = 1 with repo_a, repo_b, repo_c: - make_branch(repo_a, 'master', 'initial', {'f': 'a0'}) + repo_a.make_commits(None, Commit('initial', tree={'f': 'a0'}), ref='heads/master') pr_a = make_pr( repo_a, 'a-thing', [{'f': 'a1'}], user=config['role_user']['token'], reviewer=None, ) - make_branch(repo_b, 'master', 'initial', {'f': 'b0'}) + repo_b.make_commits(None, Commit('initial', tree={'f': 'b0'}), ref='heads/master') pr_b = make_pr( repo_b, 'a-thing', [{'f': 'b1'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], ) - make_branch(repo_c, 'master', 'initial', {'f': 'c0'}) + repo_c.make_commits(None, Commit('initial', tree={'f': 'c0'}), ref='heads/master') pr_c = make_pr( repo_c, 'a-thing', [{'f': 'c1'}], user=config['role_user']['token'], @@ -593,21 +606,21 @@ class TestCompanionsNotReady: """ project.batch_limit = 1 with repo_a, repo_b, repo_c: - make_branch(repo_a, 'master', 'initial', {'f': 'a0'}) + repo_a.make_commits(None, Commit('initial', tree={'f': 'a0'}), ref='heads/master') pr_a = make_pr( repo_a, 'a-thing', [{'f': 'a1'}], user=config['role_user']['token'], reviewer=None, ) - make_branch(repo_b, 'master', 'initial', {'f': 'b0'}) + repo_b.make_commits(None, Commit('initial', tree={'f': 'b0'}), ref='heads/master') pr_b = make_pr( repo_b, 'a-thing', [{'f': 'b1'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], ) - make_branch(repo_c, 'master', 'initial', {'f': 'c0'}) + repo_c.make_commits(None, Commit('initial', tree={'f': 'c0'}), ref='heads/master') pr_c = make_pr( repo_c, 'a-thing', [{'f': 'c1'}], user=config['role_user']['token'], @@ -619,19 +632,13 @@ class TestCompanionsNotReady: assert pr_b.comments == [ (users['reviewer'], 'hansen r+'), seen(env, pr_b, users), - (users['user'], "@%s @%s linked pull request(s) %s#%d not ready. Linked PRs are not staged until all of them are ready." % ( - users['user'], users['reviewer'], - repo_a.name, pr_a.number - )) + (users['user'], f"@{users['user']} @{users['reviewer']} linked pull request(s) {repo_a.name}#{pr_a.number} not ready. Linked PRs are not staged until all of them are ready.") ] assert pr_c.comments == [ (users['reviewer'], 'hansen r+'), seen(env, pr_c, users), (users['user'], - "@%s @%s linked pull request(s) %s#%d not ready. Linked PRs are not staged until all of them are ready." % ( - users['user'], users['reviewer'], - repo_a.name, pr_a.number - )) + f"@{users['user']} @{users['reviewer']} linked pull request(s) {repo_a.name}#{pr_a.number} not ready. Linked PRs are not staged until all of them are ready.") ] def test_other_failed(env, project, repo_a, repo_b, users, config): @@ -640,7 +647,7 @@ def test_other_failed(env, project, repo_a, repo_b, users, config): message """ with repo_a, repo_b: - make_branch(repo_a, 'master', 'initial', {'a': 'a_0'}) + repo_a.make_commits(None, Commit('initial', tree={'a': 'a_0'}), ref='heads/master') # pr_a is born ready pr_a = make_pr( repo_a, 'do-a-thing', [{'a': 'a_1'}], @@ -648,17 +655,15 @@ def test_other_failed(env, project, repo_a, repo_b, users, config): reviewer=config['role_reviewer']['token'], ) - make_branch(repo_b, 'master', 'initial', {'a': 'b_0'}) + repo_b.make_commits(None, Commit('initial', tree={'a': 'b_0'}), ref='heads/master') env.run_crons() pr = to_pr(env, pr_a) assert pr.staging_id with repo_a, repo_b: - repo_a.post_status('heads/staging.master', 'success', 'legal/cla') - repo_a.post_status('heads/staging.master', 'success', 'ci/runbot', target_url="http://example.org/a") - repo_b.post_status('heads/staging.master', 'success', 'legal/cla') - repo_b.post_status('heads/staging.master', 'failure', 'ci/runbot', target_url="http://example.org/b") + repo_a.post_status('heads/staging.master', 'success', target_url="http://example.org/a") + repo_b.post_status('heads/staging.master', 'failure', target_url="http://example.org/b") env.run_crons() sth = repo_b.commit('heads/staging.master').id @@ -667,7 +672,7 @@ def test_other_failed(env, project, repo_a, repo_b, users, config): assert pr_a.comments == [ (users['reviewer'], 'hansen r+'), seen(env, pr_a, users), - (users['user'], '@%s @%s staging failed: ci/runbot on %s (view more at http://example.org/b)' % ( + (users['user'], '@%s @%s staging failed: default on %s (view more at http://example.org/b)' % ( users['user'], users['reviewer'], sth )) @@ -681,8 +686,8 @@ class TestMultiBatches: project.batch_limit = 3 with repo_a, repo_b: - make_branch(repo_a, 'master', 'initial', {'a': 'a0'}) - make_branch(repo_b, 'master', 'initial', {'b': 'b0'}) + repo_a.make_commits(None, Commit('initial', tree={'a': 'a0'}), ref='heads/master') + repo_b.make_commits(None, Commit('initial', tree={'b': 'b0'}), ref='heads/master') prs = [( a and make_pr(repo_a, 'batch{}'.format(i), [{'a{}'.format(i): 'a{}'.format(i)}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']), @@ -714,8 +719,8 @@ class TestMultiBatches: """ If a staging fails, it should get split properly across repos """ with repo_a, repo_b: - make_branch(repo_a, 'master', 'initial', {'a': 'a0'}) - make_branch(repo_b, 'master', 'initial', {'b': 'b0'}) + repo_a.make_commits(None, Commit('initial', tree={'a': 'a0'}), ref='heads/master') + repo_b.make_commits(None, Commit('initial', tree={'b': 'b0'}), ref='heads/master') prs = [( a and make_pr(repo_a, 'batch{}'.format(i), [{'a{}'.format(i): 'a{}'.format(i)}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']), @@ -736,8 +741,7 @@ class TestMultiBatches: # mark b.staging as failed -> should create two splits with (0, 1) # and (2, 3, 4) and stage the first one with repo_b: - repo_b.post_status('heads/staging.master', 'success', 'legal/cla') - repo_b.post_status('heads/staging.master', 'failure', 'ci/runbot') + repo_b.post_status('heads/staging.master', 'failure') env.run_crons() assert not st0.active @@ -756,33 +760,46 @@ class TestMultiBatches: assert sp.mapped('batch_ids.prs') == \ prs[2][0] | prs[2][1] | prs[3][0] | prs[3][1] | prs[4][0] +@pytest.mark.usefixtures("reviewer_admin") def test_urgent(env, repo_a, repo_b, config): - """ Either PR of a co-dependent pair being p=0 leads to the entire pair - being prioritized + """ Either PR of a co-dependent pair being prioritised leads to the entire + pair being prioritized """ with repo_a, repo_b: - make_branch(repo_a, 'master', 'initial', {'a0': 'a'}) - make_branch(repo_b, 'master', 'initial', {'b0': 'b'}) + repo_a.make_commits(None, Commit('initial', tree={'a0': 'a'}), ref='heads/master') + repo_b.make_commits(None, Commit('initial', tree={'b0': 'b'}), ref='heads/master') pr_a = make_pr(repo_a, 'batch', [{'a1': 'a'}, {'a2': 'a'}], user=config['role_user']['token'], reviewer=None, statuses=[]) pr_b = make_pr(repo_b, 'batch', [{'b1': 'b'}, {'b2': 'b'}], user=config['role_user']['token'], reviewer=None, statuses=[]) - pr_c = make_pr(repo_a, 'C', [{'c1': 'c', 'c2': 'c'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'],) + pr_c = make_pr(repo_a, 'C', [{'c1': 'c', 'c2': 'c'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']) pr_a.post_comment('hansen rebase-merge', config['role_reviewer']['token']) - pr_b.post_comment('hansen rebase-merge p=0', config['role_reviewer']['token']) + pr_b.post_comment('hansen rebase-merge alone skipchecks', config['role_reviewer']['token']) env.run_crons() - # should have batched pr_a and pr_b despite neither being reviewed or - # approved - p_a, p_b = to_pr(env, pr_a), to_pr(env, pr_b) - p_c = to_pr(env, pr_c) + + p_a, p_b, p_c = to_pr(env, pr_a), to_pr(env, pr_b), to_pr(env, pr_c) + assert not p_a.blocked + assert not p_b.blocked + + assert p_a.staging_id and p_b.staging_id and p_a.staging_id == p_b.staging_id,\ + "a and b should be staged despite neither beinbg reviewed or approved" assert p_a.batch_id and p_b.batch_id and p_a.batch_id == p_b.batch_id,\ "a and b should have been recognised as co-dependent" assert not p_c.staging_id + with repo_a: + pr_a.post_comment('hansen r-', config['role_reviewer']['token']) + env.run_crons() + assert not p_b.staging_id.active, "should be unstaged" + assert p_b.priority == 'alone', "priority should not be affected anymore" + assert not p_b.skipchecks, "r- of linked pr should have un-skipcheck-ed this one" + assert p_a.blocked + assert p_b.blocked + class TestBlocked: def test_merge_method(self, env, repo_a, config): with repo_a: - make_branch(repo_a, 'master', 'initial', {'a0': 'a'}) + repo_a.make_commits(None, Commit('initial', tree={'a0': 'a'}), ref='heads/master') pr = make_pr(repo_a, 'A', [{'a1': 'a'}, {'a2': 'a'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'],) env.run_crons() @@ -796,33 +813,55 @@ class TestBlocked: def test_linked_closed(self, env, repo_a, repo_b, config): with repo_a, repo_b: - make_branch(repo_a, 'master', 'initial', {'a0': 'a'}) - make_branch(repo_b, 'master', 'initial', {'b0': 'b'}) + repo_a.make_commits(None, Commit('initial', tree={'a0': 'a'}), ref='heads/master') + repo_b.make_commits(None, Commit('initial', tree={'b0': 'b'}), ref='heads/master') - pr = make_pr(repo_a, 'xxx', [{'a1': 'a'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'],) - b = make_pr(repo_b, 'xxx', [{'b1': 'b'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], statuses=[]) + pr1_a = make_pr(repo_a, 'xxx', [{'a1': 'a'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'],) + pr1_b = make_pr(repo_b, 'xxx', [{'b1': 'b'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], statuses=[]) env.run_crons() - p = to_pr(env, pr) - assert p.blocked - with repo_b: b.close() - # FIXME: find a way for PR.blocked to depend on linked PR somehow so this isn't needed - p.invalidate_cache(['blocked'], [p.id]) - assert not p.blocked + head_a = repo_a.commit('master').id + head_b = repo_b.commit('master').id + pr1_a_id = to_pr(env, pr1_a) + pr1_b_id = to_pr(env, pr1_b) + assert pr1_a_id.blocked + with repo_b: pr1_b.close() + assert not pr1_a_id.blocked + assert len(pr1_a_id.batch_id.all_prs) == 2 + assert pr1_a_id.state == 'ready' + assert pr1_b_id.state == 'closed' + env.run_crons() + assert pr1_a_id.staging_id + with repo_a, repo_b: + repo_a.post_status('staging.master', 'success') + repo_b.post_status('staging.master', 'success') + env.run_crons() + assert pr1_a_id.state == 'merged' + assert pr1_a_id.batch_id.merge_date + assert repo_a.commit('master').id != head_a, \ + "the master of repo A should be updated" + assert repo_b.commit('master').id == head_b, \ + "the master of repo B should not be updated" + + with repo_a: + pr2_a = make_pr(repo_a, "xxx", [{'x': 'x'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token']) + env.run_crons() + pr2_a_id = to_pr(env, pr2_a) + assert pr2_a_id.batch_id != pr1_a_id.batch_id + assert pr2_a_id.label == pr1_a_id.label + assert len(pr2_a_id.batch_id.all_prs) == 1 def test_linked_merged(self, env, repo_a, repo_b, config): with repo_a, repo_b: - make_branch(repo_a, 'master', 'initial', {'a0': 'a'}) - make_branch(repo_b, 'master', 'initial', {'b0': 'b'}) + repo_a.make_commits(None, Commit('initial', tree={'a0': 'a'}), ref='heads/master') + repo_b.make_commits(None, Commit('initial', tree={'b0': 'b'}), ref='heads/master') b = make_pr(repo_b, 'xxx', [{'b1': 'b'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'],) env.run_crons() # stage b and c with repo_a, repo_b: - repo_a.post_status('heads/staging.master', 'success', 'legal/cla') - repo_a.post_status('heads/staging.master', 'success', 'ci/runbot') - repo_b.post_status('heads/staging.master', 'success', 'legal/cla') - repo_b.post_status('heads/staging.master', 'success', 'ci/runbot') + repo_a.post_status('heads/staging.master', 'success') + repo_b.post_status('heads/staging.master', 'success') env.run_crons() # merge b and c assert to_pr(env, b).state == 'merged' @@ -833,15 +872,16 @@ class TestBlocked: p = to_pr(env, pr) assert not p.blocked + @pytest.mark.usefixtures("reviewer_admin") def test_linked_unready(self, env, repo_a, repo_b, config): """ Create a PR A linked to a non-ready PR B, * A is blocked by default - * A is not blocked if A.p=0 - * A is not blocked if B.p=0 + * A is not blocked if A.skipci + * A is not blocked if B.skipci """ with repo_a, repo_b: - make_branch(repo_a, 'master', 'initial', {'a0': 'a'}) - make_branch(repo_b, 'master', 'initial', {'b0': 'b'}) + repo_a.make_commits(None, Commit('initial', tree={'a0': 'a'}), ref='heads/master') + repo_b.make_commits(None, Commit('initial', tree={'b0': 'b'}), ref='heads/master') a = make_pr(repo_a, 'xxx', [{'a1': 'a'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'],) b = make_pr(repo_b, 'xxx', [{'b1': 'b'}], user=config['role_user']['token'], reviewer=config['role_reviewer']['token'], statuses=[]) @@ -850,13 +890,11 @@ class TestBlocked: pr_a = to_pr(env, a) assert pr_a.blocked - with repo_a: a.post_comment('hansen p=0', config['role_reviewer']['token']) + with repo_a: a.post_comment('hansen skipchecks', config['role_reviewer']['token']) assert not pr_a.blocked + pr_a.skipchecks = False - with repo_a: a.post_comment('hansen p=2', config['role_reviewer']['token']) - assert pr_a.blocked - - with repo_b: b.post_comment('hansen p=0', config['role_reviewer']['token']) + with repo_b: b.post_comment('hansen skipchecks', config['role_reviewer']['token']) assert not pr_a.blocked def test_different_branches(env, project, repo_a, repo_b, config): @@ -867,9 +905,9 @@ def test_different_branches(env, project, repo_a, repo_b, config): env['runbot_merge.repository'].search([('name', '=', repo_b.name)])\ .branch_filter = '[("name", "=", "master")]' with repo_a, repo_b: - make_branch(repo_a, 'dev', 'initial', {'a': '0'}) - make_branch(repo_a, 'master', 'initial', {'b': '0'}) - make_branch(repo_b, 'master', 'initial', {'b': '0'}) + repo_a.make_commits(None, Commit('initial', tree={'a': '0'}), ref='heads/dev') + repo_a.make_commits(None, Commit('initial', tree={'b': '0'}), ref='heads/master') + repo_b.make_commits(None, Commit('initial', tree={'b': '0'}), ref='heads/master') pr_a = make_pr( repo_a, 'xxx', [{'a': '1'}], @@ -881,8 +919,7 @@ def test_different_branches(env, project, repo_a, repo_b, config): with repo_a: pr_a.post_comment('hansen r+', config['role_reviewer']['token']) - repo_a.post_status('heads/staging.dev', 'success', 'legal/cla') - repo_a.post_status('heads/staging.dev', 'success', 'ci/runbot') + repo_a.post_status('heads/staging.dev', 'success') env.run_crons() assert to_pr(env, pr_a).state == 'merged' @@ -903,6 +940,7 @@ class TestSubstitutions: 'repo_ids': [(0, 0, {'name': 'xxx/xxx'})], 'branch_ids': [(0, 0, {'name': 'master'})] }) + env['runbot_merge.events_sources'].create({'repository': 'xxx/xxx'}) r = p.repo_ids # replacement pattern, pr label, stored label cases = [ @@ -961,41 +999,32 @@ class TestSubstitutions: repo_b_id.substitutions = r"/.+:/%s:/" % repo_a.owner with repo_a: - make_branch(repo_a, 'master', 'initial', {'a': '0'}) + repo_a.make_commits(None, Commit('initial', tree={'a': '0'}), ref='heads/master') with repo_b: - make_branch(repo_b, 'master', 'initial', {'b': '0'}) + repo_b.make_commits(None, Commit('initial', tree={'b': '0'}), ref='heads/master') # policy is that repo_a PRs are created in the same repo while repo_b PRs # are created in personal forks with repo_a: repo_a.make_commits('master', repo_a.Commit('bop', tree={'a': '1'}), ref='heads/abranch') pra = repo_a.make_pr(target='master', head='abranch') - b_fork = repo_b.fork() - with b_fork, repo_b: + with repo_b, repo_b.fork() as b_fork: b_fork.make_commits('master', b_fork.Commit('pob', tree={'b': '1'}), ref='heads/abranch') prb = repo_b.make_pr( title="a pr", target='master', head='%s:abranch' % b_fork.owner ) - pra_id = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo_a.name), - ('number', '=', pra.number) - ]) - prb_id = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo_b.name), - ('number', '=', prb.number) - ]) + pra_id = to_pr(env, pra) + prb_id = to_pr(env, prb) assert pra_id.label.endswith(':abranch') assert prb_id.label.endswith(':abranch') with repo_a, repo_b: - repo_a.post_status(pra.head, 'success', 'legal/cla') - repo_a.post_status(pra.head, 'success', 'ci/runbot') + repo_a.post_status(pra.head, 'success') pra.post_comment('hansen r+', config['role_reviewer']['token']) - repo_b.post_status(prb.head, 'success', 'legal/cla') - repo_b.post_status(prb.head, 'success', 'ci/runbot') + repo_b.post_status(prb.head, 'success') prb.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() @@ -1035,6 +1064,7 @@ def test_multi_project(env, make_repo, setreviewers, users, config, 'branch_ids': [(0, 0, {'name': 'default'})], }) setreviewers(*p1.repo_ids) + env['runbot_merge.events_sources'].create([{'repository': r1.name}]) r2 = make_repo('repo_b') with r2: @@ -1055,6 +1085,7 @@ def test_multi_project(env, make_repo, setreviewers, users, config, 'branch_ids': [(0, 0, {'name': 'default'})], }) setreviewers(*p2.repo_ids) + env['runbot_merge.events_sources'].create([{'repository': r2.name}]) assert r1_dev.owner == r2_dev.owner @@ -1078,13 +1109,6 @@ def test_multi_project(env, make_repo, setreviewers, users, config, pr1_id = to_pr(env, pr1) pr2_id = to_pr(env, pr2) - print( - pr1.repo.name, pr1.number, pr1_id.display_name, pr1_id.label, - '\n', - pr2.repo.name, pr2.number, pr2_id.display_name, pr2_id.label, - flush=True, - ) - assert pr1_id.state == 'ready' and not pr1_id.blocked assert pr2_id.state == 'validated' @@ -1093,11 +1117,9 @@ def test_multi_project(env, make_repo, setreviewers, users, config, assert pr1.comments == [ (users['reviewer'], 'hansen r+'), - (users['user'], f'[Pull request status dashboard]({pr1_id.url}).'), - ] - assert pr2.comments == [ - (users['user'], f'[Pull request status dashboard]({pr2_id.url}).'), + seen(env, pr1, users), ] + assert pr2.comments == [seen(env, pr2, users)] def test_freeze_complete(env, project, repo_a, repo_b, repo_c, users, config): """ Tests the freeze wizard feature (aside from the UI): @@ -1115,6 +1137,8 @@ def test_freeze_complete(env, project, repo_a, repo_b, repo_c, users, config): * check that freeze goes through * check that reminder is shown * check that new branches are created w/ correct parent & commit info + * check that a PRs (freeze and bump) are part of synthetic stagings so + they're correctly accounted for in the change history """ project.freeze_reminder = "Don't forget to like and subscribe" @@ -1163,26 +1187,22 @@ def test_freeze_complete(env, project, repo_a, repo_b, repo_c, users, config): ]}) r = w_id.action_freeze() assert r == w, "the freeze is not ready so the wizard should redirect to itself" - owner = repo_c.owner assert w_id.errors == f"""\ -* All release PRs must have the same label, found '{owner}:release-1.1, {owner}:whocares'. +* All release PRs must have the same label, found '{pr_rel_c.user}:release-1.1, {pr_other.user}:whocares'. * 2 required PRs not ready.""" w_id.release_pr_ids[-1].pr_id = release_prs[repo_c.name].id with repo_a: pr_required_a.post_comment('hansen r+', config['role_reviewer']['token']) - repo_a.post_status('apr', 'success', 'ci/runbot') - repo_a.post_status('apr', 'success', 'legal/cla') + repo_a.post_status(pr_required_a.head, 'success') with repo_c: pr_required_c.post_comment('hansen r+', config['role_reviewer']['token']) - repo_c.post_status('cpr', 'success', 'ci/runbot') - repo_c.post_status('cpr', 'success', 'legal/cla') + repo_c.post_status(pr_required_c.head, 'success') env.run_crons() for repo in [repo_a, repo_b, repo_c]: with repo: - repo.post_status('staging.master', 'success', 'ci/runbot') - repo.post_status('staging.master', 'success', 'legal/cla') + repo.post_status('staging.master', 'success') env.run_crons() assert to_pr(env, pr_required_a).state == 'merged' @@ -1205,22 +1225,35 @@ def test_freeze_complete(env, project, repo_a, repo_b, repo_c, users, config): assert r['res_model'] == 'runbot_merge.project' assert r['res_id'] == project.id + release_pr_ids = functools.reduce(operator.add, release_prs.values()) # stuff that's done directly - for pr_id in release_prs.values(): - assert pr_id.state == 'merged' + assert all(pr_id.state == 'merged' for pr_id in release_pr_ids) assert pr_bump_id.state == 'merged' + assert pr_bump_id.commits_map != '{}' + + assert len(release_pr_ids.batch_id) == 1 + assert release_pr_ids.batch_id.merge_date + assert release_pr_ids.batch_id.staging_ids.target.name == '1.1' + assert release_pr_ids.batch_id.staging_ids.state == 'success' + + assert pr_bump_id.batch_id.merge_date + assert pr_bump_id.batch_id.staging_ids.target.name == 'master' + assert pr_bump_id.batch_id.staging_ids.state == 'success' # stuff that's behind a cron env.run_crons() + # check again to be sure + assert all(pr_id.state == 'merged' for pr_id in release_pr_ids) + assert pr_bump_id.state == 'merged' + assert pr_rel_a.state == "closed" assert pr_rel_a.base['ref'] == '1.1' assert pr_rel_b.state == "closed" assert pr_rel_b.base['ref'] == '1.1' assert pr_rel_c.state == "closed" assert pr_rel_c.base['ref'] == '1.1' - for pr_id in release_prs.values(): - assert pr_id.target.name == '1.1' + assert all(pr_id.target.name == '1.1' for pr_id in release_pr_ids) assert pr_bump_a.state == 'closed' assert pr_bump_a.base['ref'] == 'master' @@ -1247,12 +1280,12 @@ def test_freeze_complete(env, project, repo_a, repo_b, repo_c, users, config): c_b = repo_b.commit('1.1') assert c_b.message.startswith('Release 1.1 (B)') - assert repo_b.read_tree(c_b) == {'f': '1', 'version': ''} + assert repo_b.read_tree(c_b) == {'f': '1', 'version': '1.1'} assert c_b.parents[0] == master_head_b c_c = repo_c.commit('1.1') assert c_c.message.startswith('Release 1.1 (C)') - assert repo_c.read_tree(c_c) == {'f': '2', 'version': ''} + assert repo_c.read_tree(c_c) == {'f': '2', 'version': '1.1'} assert repo_c.commit(c_c.parents[0]).parents[0] == master_head_c @@ -1263,49 +1296,55 @@ def setup_mess(repo_a, repo_b, repo_c): [root, _] = r.make_commits( None, Commit('base', tree={'version': '', 'f': '0'}), - Commit('release 1.0', tree={'version': '1.0'} if r is repo_a else None), + Commit('release 1.0', tree={'version': '1.0'}), ref='heads/1.0' ) master_heads.extend(r.make_commits(root, Commit('other', tree={'f': '1'}), ref='heads/master')) + + a_fork = repo_a.fork() + b_fork = repo_b.fork() + c_fork = repo_c.fork() + assert a_fork.owner == b_fork.owner == c_fork.owner + owner = a_fork.owner # have 2 PRs required for the freeze - with repo_a: - repo_a.make_commits(master_heads[0], Commit('super important file', tree={'g': 'x'}), ref='heads/apr') - pr_required_a = repo_a.make_pr(target='master', head='apr') - with repo_c: - repo_c.make_commits(master_heads[2], Commit('update thing', tree={'f': '2'}), ref='heads/cpr') - pr_required_c = repo_c.make_pr(target='master', head='cpr') + with repo_a, a_fork: + a_fork.make_commits(master_heads[0], Commit('super important file', tree={'g': 'x'}), ref='heads/apr') + pr_required_a = repo_a.make_pr(target='master', head=f'{owner}:apr', title="xxx") + with repo_c, c_fork: + c_fork.make_commits(master_heads[2], Commit('update thing', tree={'f': '2'}), ref='heads/cpr') + pr_required_c = repo_c.make_pr(target='master', head=f'{owner}:cpr', title="yyy") # have 3 release PRs, only the first one updates the tree (version file) - with repo_a: - repo_a.make_commits( + with repo_a, a_fork: + a_fork.make_commits( master_heads[0], Commit('Release 1.1 (A)', tree={'version': '1.1'}), ref='heads/release-1.1' ) - pr_rel_a = repo_a.make_pr(target='master', head='release-1.1') - with repo_b: - repo_b.make_commits( + pr_rel_a = repo_a.make_pr(target='master', head=f'{owner}:release-1.1', title="zzz") + with repo_b, b_fork: + b_fork.make_commits( master_heads[1], - Commit('Release 1.1 (B)', tree=None), + Commit('Release 1.1 (B)', tree={'version': '1.1'}), ref='heads/release-1.1' ) - pr_rel_b = repo_b.make_pr(target='master', head='release-1.1') - with repo_c: - repo_c.make_commits(master_heads[2], Commit("Some change", tree={'a': '1'}), ref='heads/whocares') - pr_other = repo_c.make_pr(target='master', head='whocares') - repo_c.make_commits( + pr_rel_b = repo_b.make_pr(target='master', head=f'{owner}:release-1.1', title="000") + with repo_c, c_fork: + c_fork.make_commits(master_heads[2], Commit("Some change", tree={'a': '1'}), ref='heads/whocares') + pr_other = repo_c.make_pr(target='master', head=f'{owner}:whocares', title="111") + c_fork.make_commits( master_heads[2], - Commit('Release 1.1 (C)', tree=None), + Commit('Release 1.1 (C)', tree={'version': '1.1'}), ref='heads/release-1.1' ) - pr_rel_c = repo_c.make_pr(target='master', head='release-1.1') + pr_rel_c = repo_c.make_pr(target='master', head=f'{owner}:release-1.1', title="222") # have one bump PR on repo A - with repo_a: - repo_a.make_commits( + with repo_a, a_fork: + a_fork.make_commits( master_heads[0], Commit("Bump A", tree={'version': '1.2-alpha'}), ref='heads/bump-1.1', ) - pr_bump_a = repo_a.make_pr(target='master', head='bump-1.1') + pr_bump_a = repo_a.make_pr(target='master', head=f'{owner}:bump-1.1', title="333") return master_heads, (pr_required_a, None, pr_required_c), (pr_rel_a, pr_rel_b, pr_rel_c), pr_bump_a, pr_other def test_freeze_subset(env, project, repo_a, repo_b, repo_c, users, config): @@ -1422,7 +1461,8 @@ def test_freeze_conflict(env, project, repo_a, repo_b, repo_c, users, config): # create conflicting branch with repo_c: - repo_c.make_ref('heads/1.1', heads[2]) + [c] = repo_c.make_commits(heads[2], Commit("exists", tree={'version': ''})) + repo_c.make_ref('heads/1.1', c) # actually perform the freeze with pytest.raises(xmlrpc.client.Fault) as e: @@ -1436,3 +1476,63 @@ def test_freeze_conflict(env, project, repo_a, repo_b, repo_c, users, config): with pytest.raises(AssertionError) as e: repo_b.get_ref('heads/1.1') assert e.value.args[0].startswith("Not Found") + +def test_cancel_staging(env, project, repo_a, repo_b, users, config): + """If a batch is flagged as staging cancelling (from any PR), the staging + should get cancelled if and when the batch transitions to unblocked + """ + with repo_a, repo_b: + repo_a.make_commits(None, Commit('initial', tree={'a': '1'}), ref='heads/master') + repo_b.make_commits(None, Commit('initial', tree={'b': '1'}), ref='heads/master') + + pr_a = make_pr(repo_a, 'batch', [{'a': '2'}], user=config['role_user']['token'], statuses=[], reviewer=None) + pr_b = make_pr(repo_b, 'batch', [{'b': '2'}], user=config['role_user']['token'], statuses=[], reviewer=None) + pr_lone = make_pr( + repo_a, + "C", + [{'c': '1'}], + user=config['role_user']['token'], + reviewer=config['role_reviewer']['token'], + ) + env.run_crons() + + a_id, b_id, lone_id = map(to_pr, repeat(env), [pr_a, pr_b, pr_lone]) + assert lone_id.staging_id + st = lone_id.staging_id + + with repo_a: + pr_a.post_comment("hansen cancel=staging", config['role_reviewer']['token']) + assert a_id.state == 'opened' + assert a_id.cancel_staging + assert b_id.cancel_staging + assert lone_id.staging_id == st + with repo_a: + pr_a.post_comment('hansen r+', config['role_reviewer']['token']) + assert a_id.state == 'approved' + assert lone_id.staging_id == st + with repo_a: + repo_a.post_status(a_id.head, 'success') + env.run_crons() + assert a_id.state == 'ready' + assert lone_id.staging_id == st + + assert b_id.state == 'opened' + with repo_b: + pr_b.post_comment('hansen r+', config['role_reviewer']['token']) + assert b_id.state == 'approved' + assert lone_id.staging_id == st + with repo_b: + repo_b.post_status(b_id.head, 'success') + assert b_id.state == 'approved' + assert lone_id.staging_id == st + env.run_crons() + assert b_id.state == 'ready' + # should have cancelled the staging, picked a and b, and re-staged the + # entire thing + assert lone_id.staging_id != st + + assert len({ + lone_id.staging_id.id, + a_id.staging_id.id, + b_id.staging_id.id, + }) == 1 diff --git a/runbot_merge/tests/test_oddities.py b/runbot_merge/tests/test_oddities.py index f0a501ce..7f353619 100644 --- a/runbot_merge/tests/test_oddities.py +++ b/runbot_merge/tests/test_oddities.py @@ -1,3 +1,5 @@ +from operator import itemgetter + import requests from utils import Commit, to_pr, seen @@ -50,7 +52,7 @@ def test_name_search(env): prs = PRs.create({**baseline, 'number': 1964, 'label': 'victor:thump', 'head': 'a', 'message': 'x'})\ | PRs.create({**baseline, 'number': 1959, 'label': 'marcus:frankenstein', 'head': 'b', 'message': 'y'})\ | PRs.create({**baseline, 'number': 1969, 'label': 'victor:patch-1', 'head': 'c', 'message': 'z'}) - pr0, pr1, pr2 = prs.name_get() + pr0, pr1, pr2 = [[pr.id, pr.display_name] for pr in prs] assert PRs.name_search('1964') == [pr0] assert PRs.name_search('1969') == [pr2] @@ -96,7 +98,7 @@ def test_unreviewer(env, project, port): assert p.review_rights == env['res.partner.review'] -def test_staging_post_update(env, project, make_repo, setreviewers, users, config): +def test_staging_post_update(env, repo, users, config): """Because statuses come from commits, it's possible to update the commits of a staging after that staging has completed (one way or the other), either by sending statuses directly (e.g. rebuilding, for non-deterministic errors) @@ -105,21 +107,13 @@ def test_staging_post_update(env, project, make_repo, setreviewers, users, confi This makes post-mortem analysis quite confusing, so stagings should "lock in" their statuses once they complete. """ - repo = make_repo('repo') - project.write({'repo_ids': [(0, 0, { - 'name': repo.name, - 'group_id': False, - 'required_statuses': 'legal/cla,ci/runbot' - })]}) - setreviewers(*project.repo_ids) with repo: [m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref='heads/master') repo.make_commits(m, Commit('thing', tree={'m': 'c'}), ref='heads/other') pr = repo.make_pr(target='master', head='other') - repo.post_status(pr.head, 'success', 'ci/runbot') - repo.post_status(pr.head, 'success', 'legal/cla') + repo.post_status(pr.head, 'success') pr.post_comment('hansen r+ rebase-merge', config['role_reviewer']['token']) env.run_crons() pr_id = to_pr(env, pr) @@ -128,18 +122,244 @@ def test_staging_post_update(env, project, make_repo, setreviewers, users, confi staging_head = repo.commit('staging.master') with repo: - repo.post_status(staging_head, 'failure', 'ci/runbot') + repo.post_status(staging_head, 'failure') env.run_crons() assert pr_id.state == 'error' assert staging_id.state == 'failure' assert staging_id.statuses == [ - [repo.name, 'ci/runbot', 'failure', ''], + [repo.name, 'default', 'failure', ''], ] with repo: - repo.post_status(staging_head, 'success', 'ci/runbot') + repo.post_status(staging_head, 'success') env.run_crons() assert staging_id.state == 'failure' assert staging_id.statuses == [ - [repo.name, 'ci/runbot', 'failure', ''], + [repo.name, 'default', 'failure', ''], ] + +def test_merge_empty_commits(env, repo, users, config): + """The mergebot should allow merging already-empty commits. + """ + with repo: + [m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref='heads/master') + + repo.make_commits(m, Commit('thing1', tree={}), ref='heads/other1') + pr1 = repo.make_pr(target='master', head='other1') + repo.post_status(pr1.head, 'success') + pr1.post_comment('hansen r+', config['role_reviewer']['token']) + + repo.make_commits(m, Commit('thing2', tree={}), ref='heads/other2') + pr2 = repo.make_pr(target='master', head='other2') + repo.post_status(pr2.head, 'success') + pr2.post_comment('hansen r+ rebase-ff', config['role_reviewer']['token']) + env.run_crons() + pr1_id = to_pr(env, pr1) + pr2_id = to_pr(env, pr2) + assert pr1_id.staging_id and pr2_id.staging_id + + with repo: + repo.post_status('staging.master', 'success') + env.run_crons() + + assert pr1_id.state == pr2_id.state == 'merged' + + # log is most-recent-first (?) + commits = list(repo.log('master')) + head = repo.commit(commits[0]['sha']) + assert repo.read_tree(head) == {'m': 'm'} + + assert commits[0]['commit']['message'].startswith('thing2') + assert commits[1]['commit']['message'].startswith('thing1') + assert commits[2]['commit']['message'] == 'initial' + + +def test_merge_emptying_commits(env, repo, users, config): + """The mergebot should *not* allow merging non-empty commits which become + empty as part of the staging (rebasing) + """ + with repo: + [m, _] = repo.make_commits( + None, + Commit('initial', tree={'m': 'm'}), + Commit('second', tree={'m': 'c'}), + ref='heads/master', + ) + + [c1] = repo.make_commits(m, Commit('thing', tree={'m': 'c'}), ref='heads/branch1') + pr1 = repo.make_pr(target='master', head='branch1') + repo.post_status(pr1.head, 'success') + pr1.post_comment('hansen r+ rebase-ff', config['role_reviewer']['token']) + + [_, c2] = repo.make_commits( + m, + Commit('thing1', tree={'c': 'c'}), + Commit('thing2', tree={'m': 'c'}), + ref='heads/branch2', + ) + pr2 = repo.make_pr(target='master', head='branch2') + repo.post_status(pr2.head, 'success') + pr2.post_comment('hansen r+ rebase-ff', config['role_reviewer']['token']) + + repo.make_commits( + m, + Commit('thing1', tree={'m': 'x'}), + Commit('thing2', tree={'m': 'c'}), + ref='heads/branch3', + ) + pr3 = repo.make_pr(target='master', head='branch3') + repo.post_status(pr3.head, 'success') + pr3.post_comment('hansen r+ squash', config['role_reviewer']['token']) + env.run_crons() + + ping = f"@{users['user']} @{users['reviewer']}" + # check that first / sole commit emptying is caught + pr1_id = to_pr(env, pr1) + assert not pr1_id.staging_id + assert pr1.comments[3:] == [ + (users['user'], f"{ping} unable to stage: commit {c1} results in an empty tree when merged, it is likely a duplicate of a merged commit, rebase and remove.") + ] + assert pr1_id.error + assert pr1_id.state == 'error' + + # check that followup commit emptying is caught + pr2_id = to_pr(env, pr2) + assert not pr2_id.staging_id + assert pr2.comments[3:] == [ + (users['user'], f"{ping} unable to stage: commit {c2} results in an empty tree when merged, it is likely a duplicate of a merged commit, rebase and remove.") + ] + assert pr2_id.error + assert pr2_id.state == 'error' + + # check that emptied squashed pr is caught + pr3_id = to_pr(env, pr3) + assert not pr3_id.staging_id + assert pr3.comments[3:] == [ + (users['user'], f"{ping} unable to stage: results in an empty tree when merged, might be the duplicate of a merged PR.") + ] + assert pr3_id.error + assert pr3_id.state == 'error' + + # ensure the PR does not get re-staged since it's the first of the staging + # (it's the only one) + env.run_crons() + assert pr1.comments[3:] == [ + (users['user'], f"{ping} unable to stage: commit {c1} results in an empty tree when merged, it is likely a duplicate of a merged commit, rebase and remove.") + ] + assert len(pr2.comments) == 4 + assert len(pr3.comments) == 4 + +def test_force_ready(env, repo, config): + with repo: + [m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref="heads/master") + + repo.make_commits(m, Commit('first', tree={'m': 'c1'}), ref="heads/other") + pr = repo.make_pr(target='master', head='other') + env.run_crons() + + pr_id = to_pr(env, pr) + pr_id.skipchecks = True + + assert pr_id.state == 'ready' + assert pr_id.status == 'pending' + reviewer = env['res.users'].browse([env._uid]).partner_id + assert pr_id.reviewed_by == reviewer + +def test_help(env, repo, config, users, partners): + with repo: + [m] = repo.make_commits(None, Commit('initial', tree={'m': 'm'}), ref="heads/master") + + repo.make_commits(m, Commit('first', tree={'m': 'c1'}), ref="heads/other") + pr = repo.make_pr(target='master', head='other') + env.run_crons() + + for role in ['reviewer', 'self_reviewer', 'user', 'other']: + v = config[f'role_{role}'] + with repo: + pr.post_comment("hansen help", v['token']) + with repo: + pr.post_comment("hansen r+ help", config['role_reviewer']['token']) + + assert not partners['reviewer'].user_ids, "the reviewer should not be an internal user" + + group_internal = env.ref("base.group_user") + group_admin = env.ref("runbot_merge.group_admin") + env['res.users'].create({ + 'partner_id': partners['reviewer'].id, + 'login': 'reviewer', + 'groups_id': [(4, group_internal.id, 0), (4, group_admin.id, 0)], + }) + + with repo: + pr.post_comment("hansen help", config['role_reviewer']['token']) + env.run_crons() + + assert pr.comments == [ + seen(env, pr, users), + (users['reviewer'], "hansen help"), + (users['self_reviewer'], "hansen help"), + (users['user'], "hansen help"), + (users['other'], "hansen help"), + (users['reviewer'], "hansen r+ help"), + (users['reviewer'], "hansen help"), + (users['user'], REVIEWER.format(user=users['reviewer'], skip="")), + (users['user'], RANDO.format(user=users['self_reviewer'])), + (users['user'], AUTHOR.format(user=users['user'])), + (users['user'], RANDO.format(user=users['other'])), + (users['user'], + REVIEWER.format(user=users['reviewer'], skip='') + + "\n\nWarning: in invoking help, every other command has been ignored."), + (users['user'], REVIEWER.format( + user=users['reviewer'], + skip='|`skipchecks`|bypasses both statuses and review|\n', + )), + ] + +REVIEWER = """\ +Currently available commands for @{user}: + +|command|| +|-|-| +|`help`|displays this help| +|`r(eview)+`|approves the PR, if it's a forwardport also approves all non-detached parents| +|`r(eview)=<number>`|only approves the specified parents| +|`fw=no`|does not forward-port this PR| +|`fw=default`|forward-ports this PR normally| +|`fw=skipci`|does not wait for a forward-port's statuses to succeed before creating the next one| +|`up to <branch>`|only ports this PR forward to the specified branch (included)| +|`merge`|integrate the PR with a simple merge commit, using the PR description as message| +|`rebase-merge`|rebases the PR on top of the target branch the integrates with a merge commit, using the PR description as message| +|`rebase-ff`|rebases the PR on top of the target branch, then fast-forwards| +|`squash`|squashes the PR as a single commit on the target branch, using the PR description as message| +|`delegate+`|grants approval rights to the PR author| +|`delegate=<...>`|grants approval rights on this PR to the specified github users| +|`default`|stages the PR normally| +|`priority`|tries to stage this PR first, then adds `default` PRs if the staging has room| +|`alone`|stages this PR only with other PRs of the same priority| +{skip}\ +|`cancel=staging`|automatically cancels the current staging when this PR becomes ready| +|`check`|fetches or refreshes PR metadata, resets mergebot state| + +Note: this help text is dynamic and will change with the state of the PR.\ +""" +AUTHOR = """\ +Currently available commands for @{user}: + +|command|| +|-|-| +|`help`|displays this help| +|`fw=no`|does not forward-port this PR| +|`up to <branch>`|only ports this PR forward to the specified branch (included)| +|`check`|fetches or refreshes PR metadata, resets mergebot state| + +Note: this help text is dynamic and will change with the state of the PR.\ +""" +RANDO = """\ +Currently available commands for @{user}: + +|command|| +|-|-| +|`help`|displays this help| + +Note: this help text is dynamic and will change with the state of the PR.\ +""" diff --git a/runbot_merge/tests/test_project_toggles.py b/runbot_merge/tests/test_project_toggles.py new file mode 100644 index 00000000..db838df6 --- /dev/null +++ b/runbot_merge/tests/test_project_toggles.py @@ -0,0 +1,128 @@ +import datetime +import functools +from itertools import repeat + +import pytest + +from utils import Commit, to_pr, ensure_one + + +def test_disable_staging(env, project, repo, config): + """In order to avoid issues of cron locking, as well as not disable staging + for every project when trying to freeze just one of them (cough cough), a + toggle is available on the project to skip staging for it. + """ + with repo: + [m] = repo.make_commits(None, Commit("m", tree={"a": "1"}), ref="heads/master") + + [c] = repo.make_commits(m, Commit("c", tree={"a": "2"}), ref="heads/other") + pr = repo.make_pr(title="whatever", target="master", head="other") + pr.post_comment("hansen r+", config["role_reviewer"]['token']) + repo.post_status(c, "success") + env.run_crons() + + pr_id = to_pr(env, pr) + staging_1 = pr_id.staging_id + assert staging_1.active + + project.staging_enabled = False + staging_1.cancel("because") + + env.run_crons() + + assert staging_1.active is False + assert staging_1.state == "cancelled" + assert not pr_id.staging_id.active,\ + "should not be re-staged, because staging has been disabled" + +@pytest.mark.parametrize('mode,cutoff,second', [ + # default mode, the second staging is the first half of the first staging + ('default', 2, [0]), + # splits are right-biased (the midpoint is rounded down), so for odd + # staging sizes the first split is the smaller one + ('default', 3, [0]), + # if the split results in ((1, 2), 1), largest stages the second + ('largest', 3, [1, 2]), + # if the split results in ((1, 1), 2), largest stages the ready PRs + ('largest', 2, [2, 3]), + # even if it's a small minority, ready selects the ready PR(s) + ('ready', 3, [3]), + ('ready', 2, [2, 3]), +]) +def test_staging_priority(env, project, repo, config, mode, cutoff, second): + """By default, unless a PR is prioritised as "alone" splits take priority + over new stagings. + + *However* to try and maximise throughput in trying times, it's possible to + configure the project to prioritise either the largest staging (between spit + and ready batches), or to just prioritise new stagings. + """ + def select(prs, indices): + zero = env['runbot_merge.pull_requests'] + filtered = (p for i, p in enumerate(prs) if i in indices) + return functools.reduce(lambda a, b: a | b, filtered, zero) + + project.staging_priority = mode + # we need at least 3 PRs, two that we can split out, and one leftover + with repo: + [m] = repo.make_commits(None, Commit("m", tree={"ble": "1"}), ref="heads/master") + + repo.make_commits(m, Commit("c", tree={"1": "1"}), ref="heads/pr1") + pr1 = repo.make_pr(title="whatever", target="master", head="pr1") + + repo.make_commits(m, Commit("c", tree={"2": "2"}), ref="heads/pr2") + pr2 = repo.make_pr(title="whatever", target="master", head="pr2") + + repo.make_commits(m, Commit("c", tree={"3": "3"}), ref="heads/pr3") + pr3 = repo.make_pr(title="whatever", target="master", head="pr3") + + repo.make_commits(m, Commit("c", tree={"4": "4"}), ref="heads/pr4") + pr4 = repo.make_pr(title="whatever", target="master", head="pr4") + + prs = [pr1, pr2, pr3, pr4] + pr_ids = functools.reduce( + lambda a, b: a | b, + map(to_pr, repeat(env), prs) + ) + # ready the PRs for the initial staging (to split) + pre_cutoff = pr_ids[:cutoff] + with repo: + for pr, pr_id in zip(prs[:cutoff], pre_cutoff): + pr.post_comment('hansen r+', config['role_reviewer']['token']) + repo.post_status(pr_id.head, 'success') + env.run_crons() + # check they staged as expected + assert all(p.staging_id for p in pre_cutoff) + staging = ensure_one(env['runbot_merge.stagings'].search([])) + ensure_one(pre_cutoff.staging_id) + + # ready the rest + with repo: + for pr, pr_id in zip(prs[cutoff:], pr_ids[cutoff:]): + pr.post_comment('hansen r+', config['role_reviewer']['token']) + repo.post_status(pr_id.head, 'success') + env.run_crons(None) + assert not pr_ids.filtered(lambda p: p.blocked) + + # trigger a split + with repo: + repo.post_status('staging.master', 'failure') + + # specifically delay creation of new staging to observe the failed + # staging's state and the splits + model, cron_id = env['ir.model.data'].check_object_reference('runbot_merge', 'staging_cron') + staging_cron = env[model].browse([cron_id]) + staging_cron.active = False + + env.run_crons(None) + assert not staging.active + assert not env['runbot_merge.stagings'].search([]).active + assert env['runbot_merge.split'].search_count([]) == 2 + + staging_cron.active = True + # manually trigger that cron, as having the cron disabled prevented the creation of the triggers entirely + env.run_crons('runbot_merge.staging_cron') + + # check that st.pr_ids are the PRs we expect + st = env['runbot_merge.stagings'].search([]) + assert st.pr_ids == select(pr_ids, second) diff --git a/runbot_merge/tests/test_provisioning.py b/runbot_merge/tests/test_provisioning.py index 6119bf17..7cadcd52 100644 --- a/runbot_merge/tests/test_provisioning.py +++ b/runbot_merge/tests/test_provisioning.py @@ -1,4 +1,3 @@ -import pytest import requests GEORGE = { @@ -15,10 +14,8 @@ def test_basic_provisioning(env, port): assert g.partner_id.name == GEORGE['name'] assert g.partner_id.github_login == GEORGE['github_login'] assert g.oauth_uid == GEORGE['sub'] - (model, g_id) = env['ir.model.data']\ - .check_object_reference('base', 'group_user') - assert model == 'res.groups' - assert g.groups_id.id == g_id, "check that users were provisioned as internal (not portal)" + internal = env.ref('base.group_user') + assert (g.groups_id & internal) == internal, "check that users were provisioned as internal (not portal)" # repeated provisioning should be a no-op r = provision_user(port, [GEORGE]) @@ -32,24 +29,13 @@ def test_basic_provisioning(env, port): r = provision_user(port, [dict(GEORGE, name="x", github_login="y", sub="42")]) assert r == [0, 1] - # can't fail anymore because github_login now used to look up the existing - # user - # with pytest.raises(Exception): - # provision_user(port, [{ - # 'name': "other@example.org", - # 'email': "x", - # 'github_login': "y", - # 'sub': "42" - # }]) - r = provision_user(port, [dict(GEORGE, active=False)]) assert r == [0, 1] assert not env['res.users'].search([('login', '=', GEORGE['email'])]) assert env['res.partner'].search([('email', '=', GEORGE['email'])]) def test_upgrade_partner(env, port): - # If a partner exists for a github login (and / or email?) it can be - # upgraded by creating a user for it + # matching partner with an email but no github login p = env['res.partner'].create({ 'name': GEORGE['name'], 'email': GEORGE['email'], @@ -66,6 +52,7 @@ def test_upgrade_partner(env, port): p.user_ids.unlink() p.unlink() + # matching partner with a github login but no email p = env['res.partner'].create({ 'name': GEORGE['name'], 'github_login': GEORGE['github_login'], @@ -79,8 +66,47 @@ def test_upgrade_partner(env, port): 'email': GEORGE['email'], }] - p.user_ids.unlink() - p.unlink() + # matching partner with a deactivated user + p.user_ids.active = False + r = provision_user(port, [GEORGE]) + assert r == [0, 1] + assert len(p.user_ids) == 1, "provisioning should re-enable user" + assert p.user_ids.active + + # matching deactivated partner (with a deactivated user) + p.user_ids.active = False + p.active = False + r = provision_user(port, [GEORGE]) + assert r == [0, 1] + assert p.active, "provisioning should re-enable partner" + assert p.user_ids.active + +def test_duplicates(env, port): + """In case of duplicate data, the handler should probably not blow up, but + instead log a warning (so the data gets fixed eventually) and skip + """ + # dupe 1: old oauth signup account & github interaction account, provisioning + # prioritises the github account & tries to create a user for it, which + # fails because the signup account has the same oauth uid (probably) + env['res.partner'].create({'name': 'foo', 'github_login': 'foo'}) + env['res.users'].create({'login': 'foo@example.com', 'name': 'foo', 'email': 'foo@example.com', 'oauth_provider_id': 1, 'oauth_uid': '42'}) + assert provision_user(port, [{ + 'name': "foo", + 'email': 'foo@example.com', + 'github_login': 'foo', + 'sub': '42' + }]) == [0, 0] + + # dupe 2: old non-oauth signup account & github interaction account, same + # as previous except it breaks on the login instead of the oauth_uid + env['res.partner'].create({'name': 'bar', 'github_login': 'bar'}) + env['res.users'].create({'login': 'bar@example.com', 'name': 'bar', 'email': 'bar@example.com'}) + assert provision_user(port, [{ + 'name': "bar", + 'email': 'bar@example.com', + 'github_login': 'bar', + 'sub': '43' + }]) == [0, 0] def test_no_email(env, port): """ Provisioning system should ignore email-less entries @@ -88,6 +114,81 @@ def test_no_email(env, port): r = provision_user(port, [{**GEORGE, 'email': None}]) assert r == [0, 0] +def test_casing(env, port): + p = env['res.partner'].create({ + 'name': 'Bob', + 'github_login': "Bob", + }) + assert not p.user_ids + assert provision_user(port, [{ + 'name': "Bob Thebuilder", + 'github_login': "bob", + 'email': 'bob@example.org', + 'sub': '5473634', + }]) == [1, 0] + + assert p.user_ids.name == 'Bob Thebuilder' + assert p.user_ids.email == 'bob@example.org' + assert p.user_ids.oauth_uid == '5473634' + # should be written on the partner through the user + assert p.name == 'Bob Thebuilder' + assert p.email == 'bob@example.org' + assert p.github_login == 'bob' + +def test_user_leaves_and_returns(env, port): + internal = env.ref('base.group_user') + portal = env.ref('base.group_portal') + categories = internal | portal | env.ref('base.group_public') + + assert provision_user(port, [{ + "name": "Bamien Douvy", + "github_login": "DouvyB", + "email": "bado@example.org", + "sub": "123456", + }]) == [1, 0] + p = env['res.partner'].search([('github_login', '=', "DouvyB")]) + assert (p.user_ids.groups_id & categories) == internal + + # bye bye 👋 + requests.post(f'http://localhost:{port}/runbot_merge/remove_reviewers', json={ + 'jsonrpc': '2.0', + 'id': None, + 'method': 'call', + 'params': {'github_logins': ['douvyb']}, + }) + assert (p.user_ids.groups_id & categories) == portal + assert p.email is False + + # he's back ❤️ + assert provision_user(port, [{ + "name": "Bamien Douvy", + "github_login": "DouvyB", + "email": "bado@example.org", + "sub": "123456", + }]) == [0, 1] + assert (p.user_ids.groups_id & categories) == internal + assert p.email == 'bado@example.org' + +def test_bulk_ops(env, port): + a, b = env['res.partner'].create([{ + 'name': "Bob", + 'email': "bob@example.org", + 'active': False, + }, { + 'name': "Coc", + 'email': "coc@example.org", + 'active': False, + }]) + assert a.active is b.active is False + + assert provision_user(port, [ + {'email': 'bob@example.org', 'github_login': 'xyz'}, + {'email': 'coc@example.org', 'github_login': 'abc'}, + ]) == [2, 0] + assert a.users_id + assert b.users_id + assert a.active is b.active is True + def provision_user(port, users): r = requests.post(f'http://localhost:{port}/runbot_merge/provision', json={ 'jsonrpc': '2.0', @@ -97,6 +198,6 @@ def provision_user(port, users): }) r.raise_for_status() json = r.json() - assert 'error' not in json + assert 'error' not in json, json['error']['data']['debug'] return json['result'] diff --git a/runbot_merge/tests/test_staging.py b/runbot_merge/tests/test_staging.py new file mode 100644 index 00000000..c1aeeeeb --- /dev/null +++ b/runbot_merge/tests/test_staging.py @@ -0,0 +1,28 @@ +from utils import Commit, to_pr + + +def test_staging_disabled_branch(env, project, repo, config): + """Check that it's possible to disable staging on a specific branch + """ + project.branch_ids = [(0, 0, { + 'name': 'other', + 'staging_enabled': False, + })] + with repo: + [master_commit] = repo.make_commits(None, Commit("master", tree={'a': '1'}), ref="heads/master") + [c1] = repo.make_commits(master_commit, Commit("thing", tree={'a': '2'}), ref='heads/master-thing') + master_pr = repo.make_pr(title="whatever", target="master", head="master-thing") + master_pr.post_comment("hansen r+", config['role_reviewer']['token']) + repo.post_status(c1, 'success') + + [other_commit] = repo.make_commits(None, Commit("other", tree={'b': '1'}), ref='heads/other') + [c2] = repo.make_commits(other_commit, Commit("thing", tree={'b': '2'}), ref='heads/other-thing') + other_pr = repo.make_pr(title="whatever", target="other", head="other-thing") + other_pr.post_comment("hansen r+", config['role_reviewer']['token']) + repo.post_status(c2, 'success') + env.run_crons() + + assert to_pr(env, master_pr).staging_id, \ + "master is allowed to stage, should be staged" + assert not to_pr(env, other_pr).staging_id, \ + "other is *not* allowed to stage, should not be staged" diff --git a/runbot_merge/tests/test_status_overrides.py b/runbot_merge/tests/test_status_overrides.py index 9ffe7604..0d13ac61 100644 --- a/runbot_merge/tests/test_status_overrides.py +++ b/runbot_merge/tests/test_status_overrides.py @@ -50,6 +50,7 @@ def test_basic(env, project, make_repo, users, setreviewers, config): 'status_ids': [(0, 0, {'context': 'l/int'})] }) setreviewers(*project.repo_ids) + env['runbot_merge.events_sources'].create({'repository': repo.name}) # "other" can override the lint env['res.partner'].create({ 'name': config['role_other'].get('name', 'Other'), @@ -89,7 +90,7 @@ def test_basic(env, project, make_repo, users, setreviewers, config): (users['reviewer'], 'hansen r+'), seen(env, pr, users), (users['reviewer'], 'hansen override=l/int'), - (users['user'], "I'm sorry, @{}: you are not allowed to override this status.".format(users['reviewer'])), + (users['user'], "@{} you are not allowed to override 'l/int'.".format(users['reviewer'])), (users['other'], "hansen override=l/int"), ] assert pr_id.statuses == '{}' @@ -110,6 +111,7 @@ def test_multiple(env, project, make_repo, users, setreviewers, config): 'status_ids': [(0, 0, {'context': 'l/int'}), (0, 0, {'context': 'c/i'})] }) setreviewers(*project.repo_ids) + env['runbot_merge.events_sources'].create({'repository': repo.name}) # "other" can override the lints env['res.partner'].create({ 'name': config['role_other'].get('name', 'Other'), @@ -174,6 +176,7 @@ def test_no_repository(env, project, make_repo, users, setreviewers, config): 'status_ids': [(0, 0, {'context': 'l/int'})] }) setreviewers(*project.repo_ids) + env['runbot_merge.events_sources'].create({'repository': repo.name}) # "other" can override the lint env['res.partner'].create({ 'name': config['role_other'].get('name', 'Other'), diff --git a/runbot_merge/utils.py b/runbot_merge/utils.py index bd35d3db..0ba2cc49 100644 --- a/runbot_merge/utils.py +++ b/runbot_merge/utils.py @@ -3,7 +3,7 @@ import itertools import time -def shorten(text_ish, length): +def shorten(text_ish, length, cont='...'): """ If necessary, cuts-off the text or bytes input and appends ellipsis to signal the cutoff, such that the result is below the provided length (according to whatever "len" means on the text-ish so bytes or codepoints @@ -12,11 +12,10 @@ def shorten(text_ish, length): if len(text_ish or ()) <= length: return text_ish - cont = '...' if isinstance(text_ish, bytes): cont = cont.encode('ascii') # whatever # add enough room for the ellipsis - return text_ish[:length-3] + cont + return text_ish[:length-len(cont)] + cont BACKOFF_DELAYS = (0.1, 0.2, 0.4, 0.8, 1.6) def backoff(func=None, *, delays=BACKOFF_DELAYS, exc=Exception): diff --git a/runbot_merge/views/batch.xml b/runbot_merge/views/batch.xml new file mode 100644 index 00000000..3579516c --- /dev/null +++ b/runbot_merge/views/batch.xml @@ -0,0 +1,98 @@ +<odoo> + <record id="runbot_merge_action_batches" model="ir.actions.act_window"> + <field name="name">Batches</field> + <field name="res_model">runbot_merge.batch</field> + <field name="view_mode">tree,form</field> + </record> + + <record id="runbot_merge_batch_search" model="ir.ui.view"> + <field name="name">batches search</field> + <field name="model">runbot_merge.batch</field> + <field name="arch" type="xml"> + <search> + <filter name="all" domain="['|', ('active', '=', True), ('active', '=', False)]"/> + <filter name="inactive" domain="[('active', '=', False)]"/> + + <field name="name"/> + <field name="target"/> + <field name="id"/> + </search> + </field> + </record> + + <record id="runbot_merge_batch_tree" model="ir.ui.view"> + <field name="name">batches list</field> + <field name="model">runbot_merge.batch</field> + <field name="arch" type="xml"> + <tree decoration-muted="not active"> + <field name="id"/> + <field name="name"/> + <field name="target"/> + <field name="prs" widget="many2many_tags"/> + <field name="blocked"/> + <field name="active" invisible="1"/> + </tree> + </field> + </record> + + <record id="runbot_merge_batch_form" model="ir.ui.view"> + <field name="name">Batch form</field> + <field name="model">runbot_merge.batch</field> + <field name="arch" type="xml"> + <form> + <sheet> + <div class="oe_title"><h1><field name="name"/></h1></div> + <group> + <group> + <field name="target"/> + <field name="merge_date"/> + <field name="priority" invisible="merge_date"/> + <field name="skipchecks" invisible="merge_date"/> + <field name="cancel_staging" invisible="merge_date"/> + <field name="fw_policy"/> + </group> + <group> + <field name="blocked"/> + </group> + </group> + <group string="Pull Requests"> + <group colspan="4"> + <field name="all_prs" nolabel="1" readonly="1"> + <tree> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open PR"/> + <field name="display_name"/> + <field name="repository"/> + <field name="state"/> + </tree> + </field> + </group> + </group> + <group string="Genealogy"> + <group colspan="4"> + <field name="genealogy_ids" nolabel="1" readonly="1"> + <tree decoration-muted="id == parent.id"> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open batch"/> + <field name="name"/> + <field name="target"/> + <field name="all_prs" widget="many2many_tags"/> + </tree> + </field> + </group> + </group> + <group string="Stagings"> + <group colspan="4"> + <field name="staging_ids" nolabel="1" readonly="1"> + <tree> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open staging"/> + <field name="staged_at"/> + <field name="state"/> + <field name="reason"/> + </tree> + </field> + </group> + </group> + </sheet> + </form> + </field> + </record> +</odoo> diff --git a/runbot_merge/views/configuration.xml b/runbot_merge/views/configuration.xml index 70e8d710..0273a53a 100644 --- a/runbot_merge/views/configuration.xml +++ b/runbot_merge/views/configuration.xml @@ -33,11 +33,59 @@ </field> </record> - <menuitem name="Configuration" id="menu_configuration" parent="runbot_merge_menu"/> + <record id="action_feedback" model="ir.actions.act_window"> + <field name="name">Feedback Templates tree</field> + <field name="res_model">runbot_merge.pull_requests.feedback.template</field> + </record> + <record id="tree_feedback" model="ir.ui.view"> + <field name="name">Feedback Templates</field> + <field name="model">runbot_merge.pull_requests.feedback.template</field> + <field name="arch" type="xml"> + <tree> + <field name="template"/> + <field name="help"/> + </tree> + </field> + </record> + <record id="form_feedback" model="ir.ui.view"> + <field name="name">Feedback Templates form</field> + <field name="model">runbot_merge.pull_requests.feedback.template</field> + <field name="arch" type="xml"> + <form> + <sheet> + <field name="help"/> + <field name="template"/> + </sheet> + <div class="oe_chatter"> + <field name="message_ids"/> + </div> + </form> + </field> + </record> + + <record id="action_events_sources" model="ir.actions.act_window"> + <field name="name">Events Sources</field> + <field name="res_model">runbot_merge.events_sources</field> + </record> + <record id="tree_events_sources" model="ir.ui.view"> + <field name="name">Events Sources List</field> + <field name="model">runbot_merge.events_sources</field> + <field name="arch" type="xml"> + <tree editable="bottom"> + <field name="repository"/> + <field name="secret"/> + </tree> + </field> + </record> + + <menuitem name="Configuration" id="menu_configuration" parent="runbot_merge_menu"> <menuitem name="CI Overrides" id="menu_configuration_overrides" - parent="menu_configuration" action="action_overrides"/> <menuitem name="Review Rights" id="menu_configuration_review" - parent="menu_configuration" action="action_review"/> + <menuitem name="Feedback Templates" id="menu_configuration_feedback" + action="action_feedback"/> + <menuitem name="Events Sources" id="menu_configuration_events_sources" + action="action_events_sources"/> + </menuitem> </odoo> diff --git a/runbot_merge/views/mergebot.xml b/runbot_merge/views/mergebot.xml index 9cc3168b..3f1b9a88 100644 --- a/runbot_merge/views/mergebot.xml +++ b/runbot_merge/views/mergebot.xml @@ -20,6 +20,7 @@ <separator string="Required Statuses"/> <field name="status_ids"> <tree editable="bottom"> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open status"/> <field name="context"/> <field name="branch_filter"/> <field name="prs"/> @@ -30,6 +31,41 @@ </form> </field> </record> + <record id="runbot_merge_branch_form" model="ir.ui.view"> + <field name="name">Branch Form</field> + <field name="model">runbot_merge.branch</field> + <field name="arch" type="xml"> + <form> + <sheet> + <div class="oe_title"> + <h1><field name="name"/></h1> + </div> + <group> + <group> + <field name="project_id" readonly="1"/> + <field name="sequence" readonly="1"/> + </group> + <group> + <field name="active"/> + <field name="staging_enabled"/> + </group> + </group> + <separator string="Stagings"/> + <group> + <field name="active_staging_id"/> + </group> + <field name="staging_ids" nolabel="1" readonly="1"> + <tree default_order="staged_at desc"> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open staging"/> + <field name="id"/> + <field name="staged_at"/> + <field name="state"/> + </tree> + </field> + </sheet> + </form> + </field> + </record> <record id="runbot_merge_action_projects" model="ir.actions.act_window"> <field name="name">Projects</field> @@ -52,9 +88,9 @@ name="open" string="Open" domain="[('state', 'not in', ['merged', 'closed'])]" /> + <field name="label"/> <field name="number"/> <field name="author"/> - <field name="label"/> <field name="target"/> <field name="repository"/> <field name="state"/> @@ -87,50 +123,156 @@ <field name="model">runbot_merge.pull_requests</field> <field name="arch" type="xml"> <form> - <header/> + <div class="o_form_statusbar"> + <span class="o_statusbar_buttons"> + <button type="object" name="button_split" string="Split Off"/> + <field name="github_url" widget="url" class="btn btn-secondary" text="Github"/> + <field name="url" widget="url" class="btn btn-secondary" text="Frontend"/> + </span> + </div> <sheet> + <field name="project" invisible="1"/> + <field name="target_sequence" invisible="1"/> <div class="oe_title"> <h1> <field name="repository"/>#<field name="number"/> </h1> + <h2> + <field name="state"/> + <span invisible="state == 'merged' or not blocked"> + (blocked: <field name="blocked"/>) + </span> + <span invisible="state != 'merged'"> + (<field name="merge_date"/>) + </span> + </h2> </div> - <group> + <!-- main PR metadata --> + <group name="metadata"> <group> + <field name="batch_id"/> <field name="target"/> - <field name="state"/> - <field name="author"/> </group> <group> - <field name="label"/> - <field name="priority"/> - <field name="squash"/> - </group> - </group> - <group> - <group colspan="4"> + <field name="author"/> <field name="head"/> - <field name="statuses"/> - </group> - <group colspan="4"> - <field name="overrides"/> </group> </group> - <group> - <group colspan="4" string="Message"> + <notebook> + <page name="state" string="State"> + <group> + <group> + <field name="reviewed_by"/> + <field name="closed"/> + <field name="error"/> + </group> + <group> + <field name="status"/> + <details colspan="4"> + <summary>Commit Statuses</summary> + + <field name="statuses"/> + </details> + <details colspan="4"> + <summary>Overrides</summary> + <field name="overrides"/> + </details> + </group> + </group> + <group> + <group colspan="4"> + <field name="blocked"/> + </group> + </group> + </page> + <page name="configuration" string="Configuration"> + <group> + <group> + <field name="merge_method"/> + <field name="squash"/> + <field name="draft"/> + </group> + <group> + <field name="priority"/> + <field name="skipchecks" widget="boolean_toggle"/> + <field name="cancel_staging" widget="boolean_toggle"/> + </group> + </group> + <group string="Delegates"> + <group colspan="4"> + <field name="delegates" nolabel="1"> + <tree> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open partner"/> + <field name="name"/> + <field name="github_login"/> + </tree> + </field> + </group> + </group> + </page> + <page name="stagings" string="Staging History"> + <group> + <group colspan="4"> + <field name="staging_ids" nolabel="1" readonly="1"> + <tree> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open staging"/> + <field name="staged_at"/> + <field name="state"/> + <field name="reason"/> + </tree> + </field> + </group> + </group> + </page> + <page name="porting" string="Forward-Porting"> + <group> + <group> + <field name="limit_id" domain="[('project_id', '=', project), ('sequence', '<=', target_sequence)]"/> + <field string="Original PR" name="source_id"/> + <field name="parent_id"/> + <field + invisible="source_id and not parent_id" + string="Detached because" name="detach_reason" readonly="1"/> + </group> + </group> + <group> + <group colspan="4"> + <field name="forwardport_ids" nolabel="1" readonly="True"> + <tree> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open PR"/> + <field name="target" string="Branch"/> + <field name="number"/> + </tree> + </field> + </group> + </group> + </page> + </notebook> + <!-- influencers --> + <group string="Message"> + <group colspan="4"> <field name="message" nolabel="1"/> </group> </group> - <group> - <group colspan="4" string="Delegates"> - <field name="delegates" nolabel="1"> - <tree> - <field name="name"/> - <field name="github_login"/> - </tree> - </field> - </group> - </group> </sheet> + <div class="oe_chatter"> + <field name="message_follower_ids" widget="mail_followers"/> + <field name="message_ids" widget="mail_thread"/> + </div> + </form> + </field> + </record> + + <record id="runbot_merge_pull_requests_split_off_form" model="ir.ui.view"> + <field name="name">Split Off Form</field> + <field name="model">runbot_merge.pull_requests.split_off</field> + <field name="arch" type="xml"> + <form> + <field name="new_label" colspan="4"/> + <footer> + <button type="object" name="button_apply" string="Apply" class="btn btn-primary"/> + <button special="cancel" string="Cancel" class="btn btn-secondary"/> + </footer> </form> </field> </record> @@ -175,7 +317,7 @@ <field name="active" invisible="1"/> <header> <button type="object" name="action_cancel" string="Cancel" class="oe_highlight" - attrs="{'invisible': [('active', '=', False)]}" + invisible="not active" /> </header> <sheet> @@ -187,21 +329,44 @@ </group> <group> <field name="staged_at"/> + <field string="Staging Duration (seconds)" + name="staging_duration" widget="integer"/> </group> </group> - <group string="Heads"> - <field name="head_ids" colspan="4" nolabel="1"> + <group> + <group string="Heads"> + <field name="head_ids" colspan="2" nolabel="1" readonly="1"> + <tree> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open head"/> + <field name="sha"/> + </tree> + </field> + </group> + <group string="Commits"> + <field name="commit_ids" colspan="2" nolabel="1" readonly="1"> + <tree> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open commit"/> + <field name="sha"/> + </tree> + </field> + </group> + </group> + <group string="Batches"> + <field name="batch_ids" colspan="4" nolabel="1" readonly="1"> <tree> - <field name="sha"/> - <field name="statuses"/> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open batch"/> + <field name="name"/> + <field name="prs" widget="many2many_tags"/> </tree> </field> </group> - <group string="Batches"> - <field name="batch_ids" colspan="4" nolabel="1"> + <group string="PRs"> + <field name="pr_ids" colspan="4" nolabel="1" readonly="1"> <tree> - <field name="prs" widget="many2many_tags" - options="{'no_quick_create': True}"/> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open pr"/> + <field name="display_name"/> + <field name="github_url" widget="url"/> + <field name="url" widget="url"/> </tree> </field> </group> @@ -221,22 +386,62 @@ <field name="arch" type="xml"> <tree> <field name="sha"/> - <field name="statuses"/> </tree> </field> </record> + <record id="runbot_merge_commits_form" model="ir.ui.view"> + <field name="name">commits form</field> + <field name="model">runbot_merge.commit</field> + <field name="arch" type="xml"> + <form> + <sheet> + <div class="oe_title"> + <h1><field name="sha"/></h1> + </div> + <field name="statuses" widget="json"/> + <separator string="Pull Requests"/> + <field name="pull_requests" nolabel="1"> + <tree> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open pr"/> + <field name="display_name"/> + <field name="state"/> + </tree> + </field> + <separator string="Stagings (commits)"/> + <field name="commit_ids" nolabel="1" readonly="1"> + <tree> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open commit"/> + <field name="target"/> + <field name="id"/> + <field name="staged_at"/> + <field name="state"/> + </tree> + </field> + <separator string="Stagings (heads)"/> + <field name="head_ids" nolabel="1" readonly="1"> + <tree> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open head"/> + <field name="target"/> + <field name="id"/> + <field name="staged_at"/> + <field name="state"/> + </tree> + </field> + </sheet> + </form> + </field> + </record> - <menuitem name="Mergebot" id="runbot_merge_menu"/> - <menuitem name="Projects" id="runbot_merge_menu_project" - parent="runbot_merge_menu" - action="runbot_merge_action_projects"/> + <menuitem name="Mergebot" id="runbot_merge_menu"> <menuitem name="Pull Requests" id="runbot_merge_menu_prs" - parent="runbot_merge_menu" - action="runbot_merge_action_prs"/> + action="runbot_merge_action_prs" sequence="5"/> <menuitem name="Stagings" id="runbot_merge_menu_stagings" - parent="runbot_merge_menu" - action="runbot_merge_action_stagings"/> + action="runbot_merge_action_stagings" sequence="8"/> + <menuitem name="Projects" id="runbot_merge_menu_project" + action="runbot_merge_action_projects"/> + <menuitem name="Batches" id="runbot_merge_menu_batches" + action="runbot_merge_action_batches"/> <menuitem name="Commits" id="runbot_merge_menu_commits" - parent="runbot_merge_menu" action="runbot_merge_action_commits"/> + </menuitem> </odoo> diff --git a/runbot_merge/views/res_partner.xml b/runbot_merge/views/res_partner.xml index fde5f7e1..262265c3 100644 --- a/runbot_merge/views/res_partner.xml +++ b/runbot_merge/views/res_partner.xml @@ -1,5 +1,5 @@ <odoo> - <record id="runbot_merge_tree_partner" model="ir.ui.view"> + <record id="runbot_merge_search_partner" model="ir.ui.view"> <field name="name">Improve search on partners</field> <field name="model">res.partner</field> <field name="inherit_id" ref="base.view_res_partner_filter"/> @@ -25,6 +25,7 @@ <field name="display_name" string="Name"/> <field name="github_login"/> <field name="review_rights" widget="many2many_tags"/> + <field name="user_id" invisible="1"/> </tree> </xpath> </field> @@ -35,20 +36,23 @@ <field name="inherit_id" ref="base.view_partner_form"/> <field name="arch" type="xml"> <xpath expr="//sheet" position="before"> + <field name="github_login" invisible="1"/> + <field name="review_rights" invisible="1"/> <header> <button type="object" name="fetch_github_email" string="Fetch Github Email" class="oe_highlight" - attrs="{'invisible': ['|', ('email', '!=', False), ('github_login', '=', False)]}" + invisible="email or not github_login" /> </header> <div class="alert alert-warning" role="alert" - attrs="{'invisible': ['|', ('email', '!=', False), ('review_rights', '=', [])]}"> + invisible="email or not review_rights"> Reviewers must have an email address set! Without an email configured, reviews will be ignored. </div> </xpath> <xpath expr="//notebook" position="inside"> <page string="Mergebot" groups="runbot_merge.group_admin"> + <field name="override_sensitive" invisible="1"/> <group> <group> <field name="github_login"/> @@ -56,7 +60,12 @@ </group> <group> <group colspan="4" string="Review Rights"> - <field name="review_rights" nolabel="1"> + <div colspan="4" class="alert alert-warning" role="alert" invisible="not review_rights"> + Review access requires successfully following + the Code Review (QDP) and Security (DLE) + trainings. Please check before giving r+ access. + </div> + <field colspan="4" name="review_rights" nolabel="1"> <tree string="Review ACLs" editable="bottom"> <field name="repository_id"/> <field name="review"/> @@ -65,7 +74,12 @@ </field> </group> <group colspan="4"> - <field name="override_rights" widget="many2many_tags"/> + <div colspan="4" class="alert alert-danger" role="alert" invisible="not override_sensitive"> + Security Override <b>REQUIRES</b> successfully + following the Security training. Please ask DLE + before granting access. + </div> + <field colspan="4" name="override_rights" widget="many2many_tags"/> </group> </group> <group> diff --git a/runbot_merge/views/runbot_merge_project.xml b/runbot_merge/views/runbot_merge_project.xml index 2f670ed3..a6edf629 100644 --- a/runbot_merge/views/runbot_merge_project.xml +++ b/runbot_merge/views/runbot_merge_project.xml @@ -8,10 +8,10 @@ <header> <button type="object" name="action_prepare_freeze" string="Freeze" - attrs="{'invisible': [('freeze_id', '!=', False)]}"/> + invisible="freeze_id"/> <button type="object" name="action_prepare_freeze" string="View Freeze" class="oe_highlight" - attrs="{'invisible': [('freeze_id', '=', False)]}"/> + invisible="not freeze_id"/> </header> <sheet> <div class="oe_title"> @@ -25,9 +25,21 @@ <group> <group> <field name="github_token"/> - <field name="secret"/> + <field name="github_name" readonly="0" + help="Identity when creating new commits, defaults to github name, falls back to login."/> + <field name="github_email" readonly="0" + help="Identity when creating new commits, defaults to public email, falls back to primary email."/> + <span invisible="not (staging_statuses and staging_rpc)" class="alert alert-warning" role="alert"> + Avoid overlaps between GH and RPC as the older + GH statuses may overwrite more recent RPC statuses. + </span> + <field name="staging_statuses" string="Validate via GH statuses"/> + <field name="staging_rpc" string="Validate via direct RPC"/> </group> <group> + <field name="staging_enabled" widget="boolean_toggle"/> + <field name="staging_priority"/> + <field name="uniquifier"/> <field name="ci_timeout"/> <field name="batch_limit"/> </group> @@ -46,6 +58,7 @@ <field name="repo_ids"> <tree> <field name="sequence" widget="handle"/> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open repo"/> <field name="name"/> <field name="branch_filter"/> <field name="status_ids" widget="many2many_tags"/> @@ -55,8 +68,10 @@ <field name="branch_ids"> <tree editable="bottom" decoration-muted="not active"> <field name="sequence" widget="handle" /> + <button type="object" name="get_formview_action" icon="fa-external-link" title="open branch"/> <field name="name"/> - <field name="active"/> + <field name="active" widget="boolean_toggle"/> + <field name="staging_enabled" widget="boolean_toggle"/> </tree> </field> </sheet> diff --git a/runbot_merge/views/templates.xml b/runbot_merge/views/templates.xml index 73e8e883..2dff930f 100644 --- a/runbot_merge/views/templates.xml +++ b/runbot_merge/views/templates.xml @@ -7,11 +7,19 @@ <template id="link-pr" name="create a link to `pr`"> <t t-set="title"> <t t-if="pr.repository.group_id <= env.user.groups_id"> - <t t-esc="pr.message.split('\n')[0]"/> + <t t-out="pr.message.split('\n', 1)[0]"/> </t> </t> + <t t-set="title"> + <t t-if="title.strip() and pr.blocked" > + <t t-out="title.strip()"/>: <t t-out="pr.blocked"/> + </t> + <t t-else=""> + <t t-out="pr.blocked or title.strip()"/> + </t> + </t> <a t-attf-href="https://github.com/{{ pr.repository.name }}/pull/{{ pr.number }}" - t-att-title="pr.blocked or title.strip()" + t-att-title="title" t-att-target="target or None" t-att-class="classes or None" ><t t-esc="pr.display_name"/></a> @@ -24,7 +32,7 @@ data-toggle="dropdown" aria-haspopup="true" aria-expanded="true" - t-attf-title="Staged at {{staging.staged_at}}Z" + t-attf-title="Staged at {{staging.staged_at}}Z for {{round(staging.staging_duration)}}s" > <t t-out="0"/> <span class="caret"></span> @@ -72,7 +80,7 @@ <t t-call="website.layout"> <div id="wrap"><div class="container-fluid"> <t t-call="runbot_merge.alerts"/> - <section t-foreach="projects.with_context(active_test=False)" t-as="project" class="row"> + <section t-foreach="projects" t-as="project" class="row"> <h1 class="col-md-12"><t t-esc="project.name"/></h1> <div class="col-md-12"> key: @@ -154,7 +162,7 @@ <template id="stagings" name="mergebot branch stagings"> <t t-set="repo_statuses" t-value="branch.project_id.repo_ids.having_branch(branch).status_ids"/> <ul class="list-unstyled stagings"> - <t t-foreach="branch.env['runbot_merge.stagings'].search([('target', '=', branch.id)], order='staged_at desc', limit=6)" t-as="staging"> + <t t-foreach="stagings_map[branch]" t-as="staging"> <t t-set="success" t-value="staging.state == 'success'"/> <t t-set="failure" t-value="staging.state == 'failure'"/> <t t-set="pending" t-value="staging.active and (not staging.state or staging.state == 'pending')"/> @@ -187,6 +195,11 @@ </ul> <t t-call="runbot_merge.staging-statuses"> Staged <span t-field="staging.staged_at" t-options="{'widget': 'relative'}"/> + (duration <span t-field="staging.staging_duration" t-options="{ + 'widget': 'duration', + 'format': 'short', + 'round': 'minute' + }"/>) </t> </li> </t> @@ -199,6 +212,18 @@ <section class="row"> <h1 class="col-md-12"><t t-esc="branch.project_id.name"/>: <t t-esc="branch.name"/></h1> </section> + <form method="get"> + <label for="until">Staged before:</label> + <input type="datetime-local" name="until" t-att-value="until"/> + (UTC) + <label for="state">State:</label> + <select name="state"> + <option t-att-selected="'selected' if not state else None"/> + <option t-att-selected="'selected' if state == 'success' else None" value="success">Success</option> + <option t-att-selected="'selected' if state == 'failure' else None" value="failure">Failure</option> + </select> + <button type="submit">Apply</button> + </form> <table> <t t-foreach="stagings" t-as="staging"> <t t-set="success" @@ -236,6 +261,11 @@ <t t-call="runbot_merge.staging-statuses"> <span t-field="staging.staged_at" t-options="{'format': 'yyyy-MM-dd\'T\'HH:mm:ssZ'}"/> + in <span t-field="staging.staging_duration" t-options="{ + 'widget': 'duration', + 'format': 'narrow', + 'round': 'minute' + }"/> </t> </th> <td> @@ -270,7 +300,7 @@ </t> </table> <t t-if="next"> - <a t-attf-href="/runbot_merge/{{branch.id}}?until={{next}}"> + <a t-attf-href="/runbot_merge/{{branch.id}}?until={{next}}&state={{state}}"> Next > </a> </t> @@ -299,6 +329,15 @@ <t t-if="merged_head"> at <a t-attf-href="https://github.com/{{pr.repository.name}}/commit/{{merged_head}}"><t t-esc="merged_head"/></a> </t> + <p>Statuses:</p> + <ul> + <t t-foreach="pr.repository.status_ids._for_pr(pr)" t-as="ci"> + <t t-set="st" t-value="statuses.get(ci.context.strip())"/> + <li t-if="st"> + <a t-att-href="st.get('target_url') if st else None"><t t-esc="ci.context.strip()"/></a><t t-if="st and st.get('description')">: <t t-esc="st['description']"/></t> + </li> + </t> + </ul> <t t-set="linked_prs" t-value="pr._linked_prs"/> <div t-if="linked_prs"> @@ -319,7 +358,7 @@ <template id="view_pull_request_info_error"> <div class="alert alert-danger"> Error: - <span t-esc="pr.with_context(active_test=False).batch_ids[-1:].staging_id.reason"> + <span t-esc="pr.with_context(active_test=False).batch_id.staging_ids[-1:].reason"> Unable to stage PR </span> </div> @@ -391,7 +430,7 @@ </a> <a t-attf-href="/web#view_type=form&model=runbot_merge.pull_requests&id={{pr.id}}" class="btn btn-sm btn-secondary align-top float-right" - groups="base.group_user">View in backend</a> + groups="runbot_merge.group_admin">View in backend</a> </h1> <h6>Created by <span t-field="pr.author.display_name"/></h6> <t t-set="tmpl"> @@ -400,17 +439,192 @@ <t t-else="">open</t> </t> <t t-call="runbot_merge.view_pull_request_info_{{tmpl.strip()}}"/> - <t t-set="target_cls" t-value="None if pr.target.active else 'text-muted bg-warning'"/> <dl class="runbot-merge-fields"> <dt>label</dt> <dd><span t-field="pr.label"/></dd> <dt>head</dt> <dd><a t-attf-href="{{pr.github_url}}/commits/{{pr.head}}"><span t-field="pr.head"/></a></dd> - <dt t-att-class="target_cls">target</dt> - <dd t-att-class="target_cls"><span t-field="pr.target"/></dd> </dl> - <p t-field="pr.message"/> + <t t-call="runbot_merge.dashboard-table"/> + <p t-field="pr.message_html"/> </div></div> </t> </template> + + <record id="dashboard-pre" model="ir.actions.server"> + <field name="name">Preparation for the preparation of the PR dashboard content</field> + <field name="state">code</field> + <field name="model_id" ref="base.model_ir_qweb"/> + <field name="code"><![CDATA[ +project = pr.repository.project_id +genealogy = pr.batch_id.genealogy_ids +repos = project.repo_ids & genealogy.all_prs.repository +targets = genealogy.all_prs.target +if not genealogy: + # if a PR is closed, it may not have a batch to get a genealogy from, + # in which case it's just a sole soul drifting in the deep dark + branches = pr.target + repos = pr.repository +elif all(p.state in ('merged', 'closed') for p in genealogy[-1].all_prs): + branches = (project.branch_ids & targets)[::-1] +else: + # if the tip of the genealogy is not closed, extend to the furthest limit, + # keeping branches which are active or have an associated batch / PR + limit = min(genealogy.prs.limit_id, key=lambda b: (b.sequence, b.name), default=None) + limit_high = project.branch_ids.ids.index(limit.id) if limit else None + + limit = max(targets, key=lambda b: (b.sequence, b.name)) + limit_low = project.branch_ids.ids.index(limit.id) + + branches = project.branch_ids[limit_high:limit_low+1].filtered(lambda b: b.active or b in targets)[::-1] + +action = (project, repos, branches, genealogy) + ]]></field> + </record> + + <record id="dashboard-prep" model="ir.actions.server"> + <field name="name">Preparation of the PR dashboard content</field> + <field name="state">code</field> + <field name="model_id" ref="base.model_ir_qweb"/> + <field name="code"><![CDATA[ +batches = {} +for branch in [*branches, branches.browse(())]: + if genealogy: + prs_batch = genealogy.filtered(lambda b: b.target == branch).all_prs + if not (branch or prs_batch): + continue + else: + prs_batch = pr + for repo in repos: + prs = prs_batch.filtered(lambda p: p.repository == repo) + st = 0 + detached = False + pr_fmt = [] + for p in prs: + st |= (bool(p.error) << 2 | (p.state == 'merged') << 1 | bool(p.blocked) << 0) + + done = p.state in ('closed', 'merged') + # this will hide the detachment signal when the PRs are merged/closed, cleaner but less correct? + detached = detached or bool(p.source_id and not p.parent_id and not done) + label = p.state + if p.blocked: + label = "%s, %s" % (label, p.blocked) + pr_fmt.append({ + 'pr': p, + 'number': p.number, + 'label': label, + 'closed': p.closed, + 'backend_url': "/web#view_type=form&model=runbot_merge.pull_requests&id=%d" % p.id, + 'github_url': p.github_url, + 'checked': done or p.status == 'success', + 'reviewed': done or bool(p.reviewed_by), + 'attached': done or p.parent_id or not p.source_id, + }) + state = None + for i, s in zip(range(2, -1, -1), ['danger', 'success', 'warning']): + if st & (1 << i): + state = s + break + + batches[repo, branch] = { + 'active': pr in prs, + 'detached': detached, + 'state': state, + 'prs': pr_fmt, + 'pr_ids': prs, + } + +action = batches + ]]></field> + </record> + <template id="dashboard-table"> + <t t-set="pre" t-value="pr.env.ref('runbot_merge.dashboard-pre').sudo()._run_action_code_multi({'pr': pr})"/> + <t t-set="repos" t-value="pre[1]"/> + <t t-set="branches" t-value="pre[2]"/> + <t t-set="batches" t-value="env.ref('runbot_merge.dashboard-prep').sudo()._run_action_code_multi({ + 'pr': pr, + 'repos': repos, + 'branches': branches, + 'genealogy': pre[3], + })"/> + <div t-if="not pr.batch_id.target" class="alert alert-danger"> +<p>Inconsistent targets:</p> +<ul><li t-foreach="pr.batch_id.prs" t-as="p"> +<a t-att-href="p.url"><t t-out="p.display_name"/></a> has target '<t t-out="p.target.name"/>'</li></ul> + </div> + <table t-else="" class="table table-bordered table-sm"> + <colgroup> + <col/> + <col t-foreach="repos" t-as="repo" + t-att-class="'bg-info' if repo == pr.repository else None" + /> + </colgroup> + <thead> + <tr> + <th/> + <th t-foreach="repos" t-as="repo"> + <t t-out="repo.name"/> + </th> + </tr> + </thead> + <tbody> + <!-- + table-info looks like shit (possibly because no odoo styling so use bg-info + text-muted doesn't do anything, so set some opacity + --> + <tr t-foreach="branches" t-as="branch" + t-att-title="None if branch.active else 'branch is disabled'" + t-attf-class="{{ + 'bg-info' if branch == pr.target else '' + }} {{ + 'inactive' if not branch.active else '' + }}"> + <td t-out="branch.name or ''"/> + <t t-foreach="repos" t-as="repo"> + <t t-set="ps" t-value="batches[repo, branch]"/> + <t t-set="stateclass" t-value="ps['state'] and 'table-'+ps['state']"/> + <t t-set="detached" t-value="ps['detached']"/> + <td t-if="ps['prs']" + t-att-title="'detached' if detached else None" + t-attf-class="{{ + 'table-active' if ps['active'] else '' + }} {{ + 'detached' if detached else '' + }}{{stateclass}}"> + <!-- + there should be only one PR per (repo, target) but + that's not always the case + --> + <span t-foreach="ps['prs']" t-as="p" + t-att-title="p['label']" + t-att-class="'closed' if p['closed'] else None"> + <a t-attf-href="/{{repo.name}}/pull/{{p['number']}}">#<t t-out="p['number']"/></a> + <a t-attf-class="fa fa-brands fa-github" + title="Open on Github" + t-att-href="p['github_url']" + /> + <a groups="runbot_merge.group_admin" + title="Open in Backend" + t-attf-class="fa fa-external-link" + t-att-href="p['backend_url']" + /> + <sup t-if="not p['checked']" class="text-danger">missing statuses</sup> + <sup t-if="not p['reviewed']" class="text-danger">missing r+</sup> + <sup t-if="not p['attached']" + t-attf-title="detached: {{p['pr'].detach_reason}}" + class="text-warning fa fa-unlink"/> + <sup t-if="p['pr'].staging_id" class="text-success"> + staged + </sup> + <sup t-elif="p['pr']._ready" class="text-success"> + ready + </sup> + </span> + </td> + <td t-else=""/> + </t> + </tr> + </tbody> + </table> + </template> </odoo>