mirror of
https://github.com/odoo/runbot.git
synced 2025-03-27 13:25:47 +07:00
[MERGE] *: triggerify mergebot and forwardport crons
A few crons (e.g.database maintenance) remain on timers, but most of the work crons should be migrated to triggers. The purpose of this change is mostly to reduce log spam, as crons get triggered very frequently (most of them every minute) but with little to do. Secondary purposes are getting experience with cron triggers, and better sussing out dependencies between actions and crons / queues. Fixes #797
This commit is contained in:
commit
229ae45e11
75
conftest.py
75
conftest.py
@ -1,5 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import datetime
|
||||||
import select
|
import select
|
||||||
import shutil
|
import shutil
|
||||||
import threading
|
import threading
|
||||||
@ -316,7 +317,8 @@ class DbDict(dict):
|
|||||||
'-d', db, '-i', module + ',saas_worker,auth_oauth',
|
'-d', db, '-i', module + ',saas_worker,auth_oauth',
|
||||||
'--max-cron-threads', '0',
|
'--max-cron-threads', '0',
|
||||||
'--stop-after-init',
|
'--stop-after-init',
|
||||||
'--log-level', 'warn'
|
'--log-level', 'warn',
|
||||||
|
'--log-handler', 'py.warnings:ERROR',
|
||||||
],
|
],
|
||||||
check=True,
|
check=True,
|
||||||
env={**os.environ, 'XDG_DATA_HOME': str(d)}
|
env={**os.environ, 'XDG_DATA_HOME': str(d)}
|
||||||
@ -324,6 +326,7 @@ class DbDict(dict):
|
|||||||
f.write(db)
|
f.write(db)
|
||||||
f.flush()
|
f.flush()
|
||||||
os.fsync(f.fileno())
|
os.fsync(f.fileno())
|
||||||
|
subprocess.run(['psql', db, '-c', "UPDATE ir_cron SET nextcall = 'infinity'"])
|
||||||
|
|
||||||
return db
|
return db
|
||||||
|
|
||||||
@ -413,15 +416,58 @@ def dummy_addons_path():
|
|||||||
mod = pathlib.Path(dummy_addons_path, 'saas_worker')
|
mod = pathlib.Path(dummy_addons_path, 'saas_worker')
|
||||||
mod.mkdir(0o700)
|
mod.mkdir(0o700)
|
||||||
(mod / '__init__.py').write_text('''\
|
(mod / '__init__.py').write_text('''\
|
||||||
|
import builtins
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
|
||||||
|
import psycopg2
|
||||||
|
|
||||||
|
import odoo
|
||||||
from odoo import api, fields, models
|
from odoo import api, fields, models
|
||||||
|
|
||||||
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Base(models.AbstractModel):
|
class Base(models.AbstractModel):
|
||||||
_inherit = 'base'
|
_inherit = 'base'
|
||||||
|
|
||||||
def run_crons(self):
|
def run_crons(self):
|
||||||
|
builtins.forwardport_merged_before = self.env.context.get('forwardport_merged_before')
|
||||||
|
builtins.forwardport_updated_before = self.env.context.get('forwardport_updated_before')
|
||||||
self.env['ir.cron']._process_jobs(self.env.cr.dbname)
|
self.env['ir.cron']._process_jobs(self.env.cr.dbname)
|
||||||
|
del builtins.forwardport_updated_before
|
||||||
|
del builtins.forwardport_merged_before
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class IrCron(models.Model):
|
||||||
|
_inherit = 'ir.cron'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _process_jobs(cls, db_name):
|
||||||
|
t = threading.current_thread()
|
||||||
|
try:
|
||||||
|
db = odoo.sql_db.db_connect(db_name)
|
||||||
|
t.dbname = db_name
|
||||||
|
with db.cursor() as cron_cr:
|
||||||
|
# FIXME: override `_get_all_ready_jobs` to directly lock the cron?
|
||||||
|
while jobs := next((
|
||||||
|
job
|
||||||
|
for j in cls._get_all_ready_jobs(cron_cr)
|
||||||
|
if (job := cls._acquire_one_job(cron_cr, (j['id'],)))
|
||||||
|
), None):
|
||||||
|
# take into account overridings of _process_job() on that database
|
||||||
|
registry = odoo.registry(db_name)
|
||||||
|
registry[cls._name]._process_job(db, cron_cr, job)
|
||||||
|
cron_cr.commit()
|
||||||
|
|
||||||
|
except psycopg2.ProgrammingError as e:
|
||||||
|
raise
|
||||||
|
except Exception:
|
||||||
|
_logger.warning('Exception in cron:', exc_info=True)
|
||||||
|
finally:
|
||||||
|
if hasattr(t, 'dbname'):
|
||||||
|
del t.dbname
|
||||||
''', encoding='utf-8')
|
''', encoding='utf-8')
|
||||||
(mod / '__manifest__.py').write_text(pprint.pformat({
|
(mod / '__manifest__.py').write_text(pprint.pformat({
|
||||||
'name': 'dummy saas_worker',
|
'name': 'dummy saas_worker',
|
||||||
@ -445,6 +491,7 @@ def addons_path(request, dummy_addons_path):
|
|||||||
def server(request, db, port, module, addons_path, tmpdir):
|
def server(request, db, port, module, addons_path, tmpdir):
|
||||||
log_handlers = [
|
log_handlers = [
|
||||||
'odoo.modules.loading:WARNING',
|
'odoo.modules.loading:WARNING',
|
||||||
|
'py.warnings:ERROR',
|
||||||
]
|
]
|
||||||
if not request.config.getoption('--log-github'):
|
if not request.config.getoption('--log-github'):
|
||||||
log_handlers.append('github_requests:WARNING')
|
log_handlers.append('github_requests:WARNING')
|
||||||
@ -500,8 +547,8 @@ def server(request, db, port, module, addons_path, tmpdir):
|
|||||||
p.wait(timeout=30)
|
p.wait(timeout=30)
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def env(request, port, server, db, default_crons):
|
def env(request, port, server, db):
|
||||||
yield Environment(port, db, default_crons)
|
yield Environment(port, db)
|
||||||
if request.node.get_closest_marker('expect_log_errors'):
|
if request.node.get_closest_marker('expect_log_errors'):
|
||||||
if b"Traceback (most recent call last):" not in server[1]:
|
if b"Traceback (most recent call last):" not in server[1]:
|
||||||
pytest.fail("should have found error in logs.")
|
pytest.fail("should have found error in logs.")
|
||||||
@ -608,7 +655,6 @@ def _rate_limited(req):
|
|||||||
if not q.ok and q.headers.get('X-RateLimit-Remaining') == '0':
|
if not q.ok and q.headers.get('X-RateLimit-Remaining') == '0':
|
||||||
reset = int(q.headers['X-RateLimit-Reset'])
|
reset = int(q.headers['X-RateLimit-Reset'])
|
||||||
delay = max(0, round(reset - time.time() + 1.0))
|
delay = max(0, round(reset - time.time() + 1.0))
|
||||||
print("Hit rate limit, sleeping for", delay, "seconds")
|
|
||||||
time.sleep(delay)
|
time.sleep(delay)
|
||||||
continue
|
continue
|
||||||
break
|
break
|
||||||
@ -1190,11 +1236,10 @@ class LabelsProxy(collections.abc.MutableSet):
|
|||||||
assert r.ok, r.text
|
assert r.ok, r.text
|
||||||
|
|
||||||
class Environment:
|
class Environment:
|
||||||
def __init__(self, port, db, default_crons=()):
|
def __init__(self, port, db):
|
||||||
self._uid = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/common'.format(port)).authenticate(db, 'admin', 'admin', {})
|
self._uid = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/common'.format(port)).authenticate(db, 'admin', 'admin', {})
|
||||||
self._object = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/object'.format(port))
|
self._object = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/object'.format(port))
|
||||||
self._db = db
|
self._db = db
|
||||||
self._default_crons = default_crons
|
|
||||||
|
|
||||||
def __call__(self, model, method, *args, **kwargs):
|
def __call__(self, model, method, *args, **kwargs):
|
||||||
return self._object.execute_kw(
|
return self._object.execute_kw(
|
||||||
@ -1216,16 +1261,20 @@ class Environment:
|
|||||||
|
|
||||||
|
|
||||||
def run_crons(self, *xids, **kw):
|
def run_crons(self, *xids, **kw):
|
||||||
crons = xids or self._default_crons
|
crons = xids or ['runbot_merge.check_linked_prs_status']
|
||||||
print('running crons', crons, file=sys.stderr)
|
cron_ids = []
|
||||||
for xid in crons:
|
for xid in crons:
|
||||||
t0 = time.time()
|
if xid is None:
|
||||||
print('\trunning cron', xid, '...', file=sys.stderr)
|
continue
|
||||||
|
|
||||||
model, cron_id = self('ir.model.data', 'check_object_reference', *xid.split('.', 1))
|
model, cron_id = self('ir.model.data', 'check_object_reference', *xid.split('.', 1))
|
||||||
assert model == 'ir.cron', "Expected {} to be a cron, got {}".format(xid, model)
|
assert model == 'ir.cron', "Expected {} to be a cron, got {}".format(xid, model)
|
||||||
self('ir.cron', 'method_direct_trigger', [cron_id], **kw)
|
cron_ids.append(cron_id)
|
||||||
print('\tdone %.3fs' % (time.time() - t0), file=sys.stderr)
|
if cron_ids:
|
||||||
print('done', file=sys.stderr)
|
self('ir.cron', 'write', cron_ids, {
|
||||||
|
'nextcall': (datetime.datetime.utcnow() - datetime.timedelta(seconds=30)).isoformat(" ", "seconds")
|
||||||
|
}, **kw)
|
||||||
|
self('base', 'run_crons', [], **kw)
|
||||||
# sleep for some time as a lot of crap may have happened (?)
|
# sleep for some time as a lot of crap may have happened (?)
|
||||||
wait_for_hook()
|
wait_for_hook()
|
||||||
|
|
||||||
|
@ -4,10 +4,11 @@
|
|||||||
<field name="model_id" ref="model_forwardport_batches"/>
|
<field name="model_id" ref="model_forwardport_batches"/>
|
||||||
<field name="state">code</field>
|
<field name="state">code</field>
|
||||||
<field name="code">model._process()</field>
|
<field name="code">model._process()</field>
|
||||||
<field name="interval_number">1</field>
|
<field name="interval_number">6</field>
|
||||||
<field name="interval_type">minutes</field>
|
<field name="interval_type">hours</field>
|
||||||
<field name="numbercall">-1</field>
|
<field name="numbercall">-1</field>
|
||||||
<field name="doall" eval="False"/>
|
<field name="doall" eval="False"/>
|
||||||
|
<field name="priority">43</field>
|
||||||
</record>
|
</record>
|
||||||
|
|
||||||
<record model="ir.cron" id="updates">
|
<record model="ir.cron" id="updates">
|
||||||
@ -15,10 +16,11 @@
|
|||||||
<field name="model_id" ref="model_forwardport_updates"/>
|
<field name="model_id" ref="model_forwardport_updates"/>
|
||||||
<field name="state">code</field>
|
<field name="state">code</field>
|
||||||
<field name="code">model._process()</field>
|
<field name="code">model._process()</field>
|
||||||
<field name="interval_number">1</field>
|
<field name="interval_number">6</field>
|
||||||
<field name="interval_type">minutes</field>
|
<field name="interval_type">hours</field>
|
||||||
<field name="numbercall">-1</field>
|
<field name="numbercall">-1</field>
|
||||||
<field name="doall" eval="False"/>
|
<field name="doall" eval="False"/>
|
||||||
|
<field name="priority">46</field>
|
||||||
</record>
|
</record>
|
||||||
|
|
||||||
<record model="ir.cron" id="reminder">
|
<record model="ir.cron" id="reminder">
|
||||||
@ -37,7 +39,7 @@
|
|||||||
<field name="model_id" ref="model_forwardport_branch_remover"/>
|
<field name="model_id" ref="model_forwardport_branch_remover"/>
|
||||||
<field name="state">code</field>
|
<field name="state">code</field>
|
||||||
<field name="code">model._process()</field>
|
<field name="code">model._process()</field>
|
||||||
<field name="interval_number">1</field>
|
<field name="interval_number">6</field>
|
||||||
<field name="interval_type">hours</field>
|
<field name="interval_type">hours</field>
|
||||||
<field name="numbercall">-1</field>
|
<field name="numbercall">-1</field>
|
||||||
<field name="doall" eval="False"/>
|
<field name="doall" eval="False"/>
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
import builtins
|
||||||
import contextlib
|
import contextlib
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
@ -76,6 +77,16 @@ class ForwardPortTasks(models.Model, Queue):
|
|||||||
retry_after = fields.Datetime(required=True, default='1900-01-01 01:01:01')
|
retry_after = fields.Datetime(required=True, default='1900-01-01 01:01:01')
|
||||||
pr_id = fields.Many2one('runbot_merge.pull_requests')
|
pr_id = fields.Many2one('runbot_merge.pull_requests')
|
||||||
|
|
||||||
|
def create(self, vals_list):
|
||||||
|
self.env.ref('forwardport.port_forward')._trigger()
|
||||||
|
return super().create(vals_list)
|
||||||
|
|
||||||
|
def write(self, vals):
|
||||||
|
if retry := vals.get('retry_after'):
|
||||||
|
self.env.ref('forwardport.port_forward')\
|
||||||
|
._trigger(fields.Datetime.to_datetime(retry))
|
||||||
|
return super().write(vals)
|
||||||
|
|
||||||
def _search_domain(self):
|
def _search_domain(self):
|
||||||
return super()._search_domain() + [
|
return super()._search_domain() + [
|
||||||
('retry_after', '<=', fields.Datetime.to_string(fields.Datetime.now())),
|
('retry_after', '<=', fields.Datetime.to_string(fields.Datetime.now())),
|
||||||
@ -259,6 +270,10 @@ class UpdateQueue(models.Model, Queue):
|
|||||||
original_root = fields.Many2one('runbot_merge.pull_requests')
|
original_root = fields.Many2one('runbot_merge.pull_requests')
|
||||||
new_root = fields.Many2one('runbot_merge.pull_requests')
|
new_root = fields.Many2one('runbot_merge.pull_requests')
|
||||||
|
|
||||||
|
def create(self, vals_list):
|
||||||
|
self.env.ref('forwardport.updates')._trigger()
|
||||||
|
return super().create(vals_list)
|
||||||
|
|
||||||
def _process_item(self):
|
def _process_item(self):
|
||||||
previous = self.new_root
|
previous = self.new_root
|
||||||
sentry_sdk.set_tag("update-root", self.new_root.display_name)
|
sentry_sdk.set_tag("update-root", self.new_root.display_name)
|
||||||
@ -344,8 +359,12 @@ class DeleteBranches(models.Model, Queue):
|
|||||||
|
|
||||||
pr_id = fields.Many2one('runbot_merge.pull_requests')
|
pr_id = fields.Many2one('runbot_merge.pull_requests')
|
||||||
|
|
||||||
|
def create(self, vals_list):
|
||||||
|
self.env.ref('forwardport.remover')._trigger(datetime.now() - MERGE_AGE)
|
||||||
|
return super().create(vals_list)
|
||||||
|
|
||||||
def _search_domain(self):
|
def _search_domain(self):
|
||||||
cutoff = self.env.context.get('forwardport_merged_before') \
|
cutoff = getattr(builtins, 'forwardport_merged_before', None) \
|
||||||
or fields.Datetime.to_string(datetime.now() - MERGE_AGE)
|
or fields.Datetime.to_string(datetime.now() - MERGE_AGE)
|
||||||
return [('pr_id.merge_date', '<', cutoff)]
|
return [('pr_id.merge_date', '<', cutoff)]
|
||||||
|
|
||||||
|
@ -13,6 +13,7 @@ it up), ...
|
|||||||
"""
|
"""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import builtins
|
||||||
import datetime
|
import datetime
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
@ -487,7 +488,7 @@ stderr:
|
|||||||
], order='source_id, id'), lambda p: p.source_id)
|
], order='source_id, id'), lambda p: p.source_id)
|
||||||
|
|
||||||
def _reminder(self):
|
def _reminder(self):
|
||||||
cutoff = self.env.context.get('forwardport_updated_before') \
|
cutoff = getattr(builtins, 'forwardport_updated_before', None) \
|
||||||
or fields.Datetime.to_string(datetime.datetime.now() - DEFAULT_DELTA)
|
or fields.Datetime.to_string(datetime.datetime.now() - DEFAULT_DELTA)
|
||||||
cutoff_dt = fields.Datetime.from_string(cutoff)
|
cutoff_dt = fields.Datetime.from_string(cutoff)
|
||||||
|
|
||||||
@ -501,8 +502,6 @@ stderr:
|
|||||||
pr_ids = {p.id for p in prs}
|
pr_ids = {p.id for p in prs}
|
||||||
for pr in prs:
|
for pr in prs:
|
||||||
pr_ids.discard(pr.parent_id.id)
|
pr_ids.discard(pr.parent_id.id)
|
||||||
print(source, prs, [p.parent_id for p in prs],
|
|
||||||
'\n\t->', pr_ids, flush=True)
|
|
||||||
for pr in (p for p in prs if p.id in pr_ids):
|
for pr in (p for p in prs if p.id in pr_ids):
|
||||||
self.env.ref('runbot_merge.forwardport.reminder')._send(
|
self.env.ref('runbot_merge.forwardport.reminder')._send(
|
||||||
repository=pr.repository,
|
repository=pr.repository,
|
||||||
|
@ -22,5 +22,7 @@ class FreezeWizard(models.Model):
|
|||||||
def unlink(self):
|
def unlink(self):
|
||||||
r = super().unlink()
|
r = super().unlink()
|
||||||
if not (self.env.context.get('forwardport_keep_disabled') or self.search_count([])):
|
if not (self.env.context.get('forwardport_keep_disabled') or self.search_count([])):
|
||||||
self.env.ref('forwardport.port_forward').active = True
|
cron = self.env.ref('forwardport.port_forward')
|
||||||
|
cron.active = True
|
||||||
|
cron._trigger() # process forward ports enqueued during the freeze period
|
||||||
return r
|
return r
|
||||||
|
@ -4,18 +4,6 @@ import re
|
|||||||
import pytest
|
import pytest
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def default_crons():
|
|
||||||
return [
|
|
||||||
'runbot_merge.process_updated_commits',
|
|
||||||
'runbot_merge.merge_cron',
|
|
||||||
'runbot_merge.staging_cron',
|
|
||||||
'forwardport.port_forward',
|
|
||||||
'forwardport.updates',
|
|
||||||
'runbot_merge.check_linked_prs_status',
|
|
||||||
'runbot_merge.feedback_cron',
|
|
||||||
]
|
|
||||||
|
|
||||||
# public_repo — necessary to leave comments
|
# public_repo — necessary to leave comments
|
||||||
# admin:repo_hook — to set up hooks (duh)
|
# admin:repo_hook — to set up hooks (duh)
|
||||||
# delete_repo — to cleanup repos created under a user
|
# delete_repo — to cleanup repos created under a user
|
||||||
|
@ -123,7 +123,7 @@ def test_straightforward_flow(env, config, make_repo, users):
|
|||||||
prod.post_status(pr1.head, 'success', 'legal/cla')
|
prod.post_status(pr1.head, 'success', 'legal/cla')
|
||||||
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
||||||
|
|
||||||
pr0_, pr1_, pr2 = env['runbot_merge.pull_requests'].search([], order='number')
|
pr0_, pr1_, pr2 = env['runbot_merge.pull_requests'].search([], order='number')
|
||||||
|
|
||||||
@ -329,8 +329,8 @@ def test_empty(env, config, make_repo, users):
|
|||||||
assert project.fp_github_name == users['other']
|
assert project.fp_github_name == users['other']
|
||||||
|
|
||||||
# check reminder
|
# check reminder
|
||||||
env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
||||||
env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
||||||
|
|
||||||
awaiting = (
|
awaiting = (
|
||||||
users['other'],
|
users['other'],
|
||||||
@ -374,7 +374,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
|||||||
# check that this stops if we close the PR
|
# check that this stops if we close the PR
|
||||||
with prod:
|
with prod:
|
||||||
fail_pr.close()
|
fail_pr.close()
|
||||||
env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
||||||
assert pr1.comments == [
|
assert pr1.comments == [
|
||||||
(users['reviewer'], 'hansen r+'),
|
(users['reviewer'], 'hansen r+'),
|
||||||
seen(env, pr1, users),
|
seen(env, pr1, users),
|
||||||
@ -597,14 +597,14 @@ def test_disapproval(env, config, make_repo, users):
|
|||||||
prod.post_status(pr2_id.head, 'success')
|
prod.post_status(pr2_id.head, 'success')
|
||||||
pr2.post_comment('hansen r+', token=config['role_other']['token'])
|
pr2.post_comment('hansen r+', token=config['role_other']['token'])
|
||||||
# no point creating staging for our needs, just propagate statuses
|
# no point creating staging for our needs, just propagate statuses
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert pr1_id.state == 'ready'
|
assert pr1_id.state == 'ready'
|
||||||
assert pr2_id.state == 'ready'
|
assert pr2_id.state == 'ready'
|
||||||
|
|
||||||
# oh no, pr1 has an error!
|
# oh no, pr1 has an error!
|
||||||
with prod:
|
with prod:
|
||||||
pr1.post_comment('hansen r-', token=config['role_other']['token'])
|
pr1.post_comment('hansen r-', token=config['role_other']['token'])
|
||||||
env.run_crons('runbot_merge.feedback_cron')
|
env.run_crons(None)
|
||||||
assert pr1_id.state == 'validated', "pr1 should not be approved anymore"
|
assert pr1_id.state == 'validated', "pr1 should not be approved anymore"
|
||||||
assert pr2_id.state == 'ready', "pr2 should not be affected"
|
assert pr2_id.state == 'ready', "pr2 should not be affected"
|
||||||
|
|
||||||
@ -899,7 +899,7 @@ class TestClosing:
|
|||||||
prod.post_status(pr1_id.head, 'success', 'legal/cla')
|
prod.post_status(pr1_id.head, 'success', 'legal/cla')
|
||||||
prod.post_status(pr1_id.head, 'success', 'ci/runbot')
|
prod.post_status(pr1_id.head, 'success', 'ci/runbot')
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron')
|
env.run_crons('forwardport.reminder')
|
||||||
|
|
||||||
assert env['runbot_merge.pull_requests'].search([], order='number') == pr0_id | pr1_id,\
|
assert env['runbot_merge.pull_requests'].search([], order='number') == pr0_id | pr1_id,\
|
||||||
"closing the PR should suppress the FP sequence"
|
"closing the PR should suppress the FP sequence"
|
||||||
|
@ -695,7 +695,7 @@ def test_retarget_after_freeze(env, config, make_repo, users):
|
|||||||
port_pr.base = 'bprime'
|
port_pr.base = 'bprime'
|
||||||
assert port_id.target == new_branch
|
assert port_id.target == new_branch
|
||||||
|
|
||||||
env.run_crons('forwardport.port_forward')
|
env.run_crons(None)
|
||||||
assert not job.exists(), "job should have succeeded and apoptosed"
|
assert not job.exists(), "job should have succeeded and apoptosed"
|
||||||
|
|
||||||
# since the PR was "already forward-ported" to the new branch it should not
|
# since the PR was "already forward-ported" to the new branch it should not
|
||||||
@ -816,11 +816,7 @@ def test_freeze(env, config, make_repo, users):
|
|||||||
# re-enable forward-port cron after freeze
|
# re-enable forward-port cron after freeze
|
||||||
_, cron_id = env['ir.model.data'].check_object_reference('forwardport', 'port_forward', context={'active_test': False})
|
_, cron_id = env['ir.model.data'].check_object_reference('forwardport', 'port_forward', context={'active_test': False})
|
||||||
env['ir.cron'].browse([cron_id]).active = True
|
env['ir.cron'].browse([cron_id]).active = True
|
||||||
|
env.run_crons('forwardport.port_forward')
|
||||||
# run crons to process the feedback, run a second time in case of e.g.
|
|
||||||
# forward porting
|
|
||||||
env.run_crons()
|
|
||||||
env.run_crons()
|
|
||||||
|
|
||||||
assert release_id.state == 'merged'
|
assert release_id.state == 'merged'
|
||||||
assert not env['runbot_merge.pull_requests'].search([
|
assert not env['runbot_merge.pull_requests'].search([
|
||||||
@ -896,7 +892,7 @@ def test_missing_magic_ref(env, config, make_repo):
|
|||||||
# check that the batch is still here and targeted for the future
|
# check that the batch is still here and targeted for the future
|
||||||
req = env['forwardport.batches'].search([])
|
req = env['forwardport.batches'].search([])
|
||||||
assert len(req) == 1
|
assert len(req) == 1
|
||||||
assert req.retry_after > datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
|
assert req.retry_after > datetime.utcnow().isoformat(" ", "seconds")
|
||||||
# reset retry_after
|
# reset retry_after
|
||||||
req.retry_after = '1900-01-01 01:01:01'
|
req.retry_after = '1900-01-01 01:01:01'
|
||||||
|
|
||||||
@ -905,7 +901,7 @@ def test_missing_magic_ref(env, config, make_repo):
|
|||||||
[c2] = prod.make_commits(a_head.id, Commit('y', tree={'x': '0'}))
|
[c2] = prod.make_commits(a_head.id, Commit('y', tree={'x': '0'}))
|
||||||
assert c2 != c
|
assert c2 != c
|
||||||
pr_id.head = c2
|
pr_id.head = c2
|
||||||
env.run_crons()
|
env.run_crons(None)
|
||||||
|
|
||||||
fp_id = env['runbot_merge.pull_requests'].search([('source_id', '=', pr_id.id)])
|
fp_id = env['runbot_merge.pull_requests'].search([('source_id', '=', pr_id.id)])
|
||||||
assert fp_id
|
assert fp_id
|
||||||
@ -1161,7 +1157,7 @@ def test_reminder_detached(env, config, make_repo, users):
|
|||||||
pr_c = prod.get_pr(pr_c_id.number)
|
pr_c = prod.get_pr(pr_c_id.number)
|
||||||
|
|
||||||
# region sanity check
|
# region sanity check
|
||||||
env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
||||||
|
|
||||||
assert pr_b.comments == [
|
assert pr_b.comments == [
|
||||||
seen(env, pr_b, users),
|
seen(env, pr_b, users),
|
||||||
@ -1195,12 +1191,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port
|
|||||||
|
|
||||||
# region check detached
|
# region check detached
|
||||||
pr_c_id.write({'parent_id': False, 'detach_reason': 'because'})
|
pr_c_id.write({'parent_id': False, 'detach_reason': 'because'})
|
||||||
env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK})
|
||||||
|
|
||||||
for p in env['runbot_merge.pull_requests'].search_read(
|
|
||||||
[], ['id', 'parent_id', 'source_id', 'display_name']
|
|
||||||
):
|
|
||||||
print(p, flush=True)
|
|
||||||
|
|
||||||
assert pr_b.comments[2:] == [
|
assert pr_b.comments[2:] == [
|
||||||
(users['user'], "@%s @%s child PR %s was modified / updated and has become a normal PR. This PR (and any of its parents) will need to be merged independently as approvals won't cross." % (
|
(users['user'], "@%s @%s child PR %s was modified / updated and has become a normal PR. This PR (and any of its parents) will need to be merged independently as approvals won't cross." % (
|
||||||
|
@ -4,30 +4,33 @@
|
|||||||
<field name="model_id" ref="model_runbot_merge_project"/>
|
<field name="model_id" ref="model_runbot_merge_project"/>
|
||||||
<field name="state">code</field>
|
<field name="state">code</field>
|
||||||
<field name="code">model._check_stagings(True)</field>
|
<field name="code">model._check_stagings(True)</field>
|
||||||
<field name="interval_number">1</field>
|
<field name="interval_number">6</field>
|
||||||
<field name="interval_type">minutes</field>
|
<field name="interval_type">hours</field>
|
||||||
<field name="numbercall">-1</field>
|
<field name="numbercall">-1</field>
|
||||||
<field name="doall" eval="False"/>
|
<field name="doall" eval="False"/>
|
||||||
|
<field name="priority">30</field>
|
||||||
</record>
|
</record>
|
||||||
<record model="ir.cron" id="staging_cron">
|
<record model="ir.cron" id="staging_cron">
|
||||||
<field name="name">Check for progress of PRs and create Stagings</field>
|
<field name="name">Check for progress of PRs and create Stagings</field>
|
||||||
<field name="model_id" ref="model_runbot_merge_project"/>
|
<field name="model_id" ref="model_runbot_merge_project"/>
|
||||||
<field name="state">code</field>
|
<field name="state">code</field>
|
||||||
<field name="code">model._create_stagings(True)</field>
|
<field name="code">model._create_stagings(True)</field>
|
||||||
<field name="interval_number">1</field>
|
<field name="interval_number">6</field>
|
||||||
<field name="interval_type">minutes</field>
|
<field name="interval_type">hours</field>
|
||||||
<field name="numbercall">-1</field>
|
<field name="numbercall">-1</field>
|
||||||
<field name="doall" eval="False"/>
|
<field name="doall" eval="False"/>
|
||||||
|
<field name="priority">40</field>
|
||||||
</record>
|
</record>
|
||||||
<record model="ir.cron" id="feedback_cron">
|
<record model="ir.cron" id="feedback_cron">
|
||||||
<field name="name">Send feedback to PR</field>
|
<field name="name">Send feedback to PR</field>
|
||||||
<field name="model_id" ref="model_runbot_merge_pull_requests_feedback"/>
|
<field name="model_id" ref="model_runbot_merge_pull_requests_feedback"/>
|
||||||
<field name="state">code</field>
|
<field name="state">code</field>
|
||||||
<field name="code">model._send()</field>
|
<field name="code">model._send()</field>
|
||||||
<field name="interval_number">1</field>
|
<field name="interval_number">6</field>
|
||||||
<field name="interval_type">minutes</field>
|
<field name="interval_type">hours</field>
|
||||||
<field name="numbercall">-1</field>
|
<field name="numbercall">-1</field>
|
||||||
<field name="doall" eval="False"/>
|
<field name="doall" eval="False"/>
|
||||||
|
<field name="priority">60</field>
|
||||||
</record>
|
</record>
|
||||||
<record model="ir.cron" id="labels_cron">
|
<record model="ir.cron" id="labels_cron">
|
||||||
<field name="name">Update labels on PR</field>
|
<field name="name">Update labels on PR</field>
|
||||||
@ -38,16 +41,18 @@
|
|||||||
<field name="interval_type">minutes</field>
|
<field name="interval_type">minutes</field>
|
||||||
<field name="numbercall">-1</field>
|
<field name="numbercall">-1</field>
|
||||||
<field name="doall" eval="False"/>
|
<field name="doall" eval="False"/>
|
||||||
|
<field name="priority">70</field>
|
||||||
</record>
|
</record>
|
||||||
<record model="ir.cron" id="fetch_prs_cron">
|
<record model="ir.cron" id="fetch_prs_cron">
|
||||||
<field name="name">Check for PRs to fetch</field>
|
<field name="name">Check for PRs to fetch</field>
|
||||||
<field name="model_id" ref="model_runbot_merge_fetch_job"/>
|
<field name="model_id" ref="model_runbot_merge_fetch_job"/>
|
||||||
<field name="state">code</field>
|
<field name="state">code</field>
|
||||||
<field name="code">model._check(True)</field>
|
<field name="code">model._check(True)</field>
|
||||||
<field name="interval_number">1</field>
|
<field name="interval_number">6</field>
|
||||||
<field name="interval_type">minutes</field>
|
<field name="interval_type">hours</field>
|
||||||
<field name="numbercall">-1</field>
|
<field name="numbercall">-1</field>
|
||||||
<field name="doall" eval="False"/>
|
<field name="doall" eval="False"/>
|
||||||
|
<field name="priority">10</field>
|
||||||
</record>
|
</record>
|
||||||
<record model="ir.cron" id="check_linked_prs_status">
|
<record model="ir.cron" id="check_linked_prs_status">
|
||||||
<field name="name">Warn on linked PRs where only one is ready</field>
|
<field name="name">Warn on linked PRs where only one is ready</field>
|
||||||
@ -58,15 +63,17 @@
|
|||||||
<field name="interval_type">hours</field>
|
<field name="interval_type">hours</field>
|
||||||
<field name="numbercall">-1</field>
|
<field name="numbercall">-1</field>
|
||||||
<field name="doall" eval="False"/>
|
<field name="doall" eval="False"/>
|
||||||
|
<field name="priority">50</field>
|
||||||
</record>
|
</record>
|
||||||
<record model="ir.cron" id="process_updated_commits">
|
<record model="ir.cron" id="process_updated_commits">
|
||||||
<field name="name">Impact commit statuses on PRs and stagings</field>
|
<field name="name">Impact commit statuses on PRs and stagings</field>
|
||||||
<field name="model_id" ref="model_runbot_merge_commit"/>
|
<field name="model_id" ref="model_runbot_merge_commit"/>
|
||||||
<field name="state">code</field>
|
<field name="state">code</field>
|
||||||
<field name="code">model._notify()</field>
|
<field name="code">model._notify()</field>
|
||||||
<field name="interval_number">30</field>
|
<field name="interval_number">6</field>
|
||||||
<field name="interval_type">minutes</field>
|
<field name="interval_type">hours</field>
|
||||||
<field name="numbercall">-1</field>
|
<field name="numbercall">-1</field>
|
||||||
<field name="doall" eval="False"/>
|
<field name="doall" eval="False"/>
|
||||||
|
<field name="priority">20</field>
|
||||||
</record>
|
</record>
|
||||||
</odoo>
|
</odoo>
|
||||||
|
@ -223,13 +223,15 @@ class Batch(models.Model):
|
|||||||
failed and f"{failed} have failed CI",
|
failed and f"{failed} have failed CI",
|
||||||
]))
|
]))
|
||||||
else:
|
else:
|
||||||
if batch.blocked and batch.cancel_staging:
|
if batch.blocked:
|
||||||
if splits := batch.target.split_ids:
|
self.env.ref("runbot_merge.staging_cron")._trigger()
|
||||||
splits.unlink()
|
if batch.cancel_staging:
|
||||||
batch.target.active_staging_id.cancel(
|
if splits := batch.target.split_ids:
|
||||||
'unstaged by %s becoming ready',
|
splits.unlink()
|
||||||
', '.join(batch.prs.mapped('display_name')),
|
batch.target.active_staging_id.cancel(
|
||||||
)
|
'unstaged by %s becoming ready',
|
||||||
|
', '.join(batch.prs.mapped('display_name')),
|
||||||
|
)
|
||||||
batch.blocked = False
|
batch.blocked = False
|
||||||
|
|
||||||
|
|
||||||
|
@ -9,10 +9,11 @@ class BranchCleanup(models.TransientModel):
|
|||||||
_description = "cleans up scratch refs for deactivated branches"
|
_description = "cleans up scratch refs for deactivated branches"
|
||||||
|
|
||||||
def _run(self):
|
def _run(self):
|
||||||
deactivated = self.env['runbot_merge.branch'].search([
|
domain = [('active', '=', False)]
|
||||||
('active', '=', False),
|
if lastcall := self.env.context['lastcall']:
|
||||||
('write_date', '>=', self.env.context['lastcall']),
|
domain.append(('write_date', '>=', lastcall))
|
||||||
])
|
deactivated = self.env['runbot_merge.branch'].search(domain)
|
||||||
|
|
||||||
_logger.info(
|
_logger.info(
|
||||||
"deleting scratch (tmp and staging) refs for branches %s",
|
"deleting scratch (tmp and staging) refs for branches %s",
|
||||||
', '.join(b.name for b in deactivated)
|
', '.join(b.name for b in deactivated)
|
||||||
@ -29,4 +30,4 @@ class BranchCleanup(models.TransientModel):
|
|||||||
_logger.info("no tmp branch found for %s:%s", r.name, b.name)
|
_logger.info("no tmp branch found for %s:%s", r.name, b.name)
|
||||||
res = gh('delete', f'git/refs/heads/staging.{b.name}', check=False)
|
res = gh('delete', f'git/refs/heads/staging.{b.name}', check=False)
|
||||||
if res.status_code != 204:
|
if res.status_code != 204:
|
||||||
_logger.info("no staging branch found for %s:%s", res.name, b.name)
|
_logger.info("no staging branch found for %s:%s", r.name, b.name)
|
||||||
|
@ -1774,6 +1774,12 @@ class Feedback(models.Model):
|
|||||||
help="Token field (from repo's project) to use to post messages"
|
help="Token field (from repo's project) to use to post messages"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@api.model_create_multi
|
||||||
|
def create(self, vals_list):
|
||||||
|
# any time a feedback is created, it can be sent
|
||||||
|
self.env.ref('runbot_merge.feedback_cron')._trigger()
|
||||||
|
return super().create(vals_list)
|
||||||
|
|
||||||
def _send(self):
|
def _send(self):
|
||||||
ghs = {}
|
ghs = {}
|
||||||
to_remove = []
|
to_remove = []
|
||||||
@ -2056,16 +2062,28 @@ class Stagings(models.Model):
|
|||||||
for context, status in statuses.get(commit.sha, {}).items()
|
for context, status in statuses.get(commit.sha, {}).items()
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def write(self, vals):
|
||||||
|
if timeout := vals.get('timeout_limit'):
|
||||||
|
self.env.ref("runbot_merge.merge_cron")\
|
||||||
|
._trigger(fields.Datetime.to_datetime(timeout))
|
||||||
|
|
||||||
|
if vals.get('active') is False:
|
||||||
|
self.env.ref("runbot_merge.staging_cron")._trigger()
|
||||||
|
|
||||||
|
return super().write(vals)
|
||||||
|
|
||||||
# only depend on staged_at as it should not get modified, but we might
|
# only depend on staged_at as it should not get modified, but we might
|
||||||
# update the CI timeout after the staging have been created and we
|
# update the CI timeout after the staging have been created and we
|
||||||
# *do not* want to update the staging timeouts in that case
|
# *do not* want to update the staging timeouts in that case
|
||||||
@api.depends('staged_at')
|
@api.depends('staged_at')
|
||||||
def _compute_timeout_limit(self):
|
def _compute_timeout_limit(self):
|
||||||
|
timeouts = set()
|
||||||
for st in self:
|
for st in self:
|
||||||
st.timeout_limit = fields.Datetime.to_string(
|
t = st.timeout_limit = st.staged_at + datetime.timedelta(minutes=st.target.project_id.ci_timeout)
|
||||||
fields.Datetime.from_string(st.staged_at)
|
timeouts.add(t)
|
||||||
+ datetime.timedelta(minutes=st.target.project_id.ci_timeout)
|
if timeouts:
|
||||||
)
|
# we might have very different limits for each staging so need to schedule them all
|
||||||
|
self.env.ref("runbot_merge.merge_cron")._trigger_list(timeouts)
|
||||||
|
|
||||||
@api.depends('batch_ids.prs')
|
@api.depends('batch_ids.prs')
|
||||||
def _compute_prs(self):
|
def _compute_prs(self):
|
||||||
@ -2141,9 +2159,11 @@ class Stagings(models.Model):
|
|||||||
|
|
||||||
s.state = st
|
s.state = st
|
||||||
if s.state != 'pending':
|
if s.state != 'pending':
|
||||||
|
self.env.ref("runbot_merge.merge_cron")._trigger()
|
||||||
s.staging_end = fields.Datetime.now()
|
s.staging_end = fields.Datetime.now()
|
||||||
if update_timeout_limit:
|
if update_timeout_limit:
|
||||||
s.timeout_limit = fields.Datetime.to_string(datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout))
|
s.timeout_limit = datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout)
|
||||||
|
self.env.ref("runbot_merge.merge_cron")._trigger(s.timeout_limit)
|
||||||
_logger.debug("%s got pending status, bumping timeout to %s (%s)", self, s.timeout_limit, cmap)
|
_logger.debug("%s got pending status, bumping timeout to %s (%s)", self, s.timeout_limit, cmap)
|
||||||
|
|
||||||
def action_cancel(self):
|
def action_cancel(self):
|
||||||
@ -2424,6 +2444,11 @@ class FetchJob(models.Model):
|
|||||||
number = fields.Integer(required=True, group_operator=None)
|
number = fields.Integer(required=True, group_operator=None)
|
||||||
closing = fields.Boolean(default=False)
|
closing = fields.Boolean(default=False)
|
||||||
|
|
||||||
|
@api.model_create_multi
|
||||||
|
def create(self, vals_list):
|
||||||
|
self.env.ref('runbot_merge.fetch_prs_cron')._trigger()
|
||||||
|
return super().create(vals_list)
|
||||||
|
|
||||||
def _check(self, commit=False):
|
def _check(self, commit=False):
|
||||||
"""
|
"""
|
||||||
:param bool commit: commit after each fetch has been executed
|
:param bool commit: commit after each fetch has been executed
|
||||||
|
@ -4,23 +4,6 @@ import pytest
|
|||||||
def module():
|
def module():
|
||||||
return 'runbot_merge'
|
return 'runbot_merge'
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def default_crons():
|
|
||||||
return [
|
|
||||||
# env['runbot_merge.project']._check_fetch()
|
|
||||||
'runbot_merge.fetch_prs_cron',
|
|
||||||
# env['runbot_merge.commit']._notify()
|
|
||||||
'runbot_merge.process_updated_commits',
|
|
||||||
# env['runbot_merge.project']._check_stagings()
|
|
||||||
'runbot_merge.merge_cron',
|
|
||||||
# env['runbot_merge.project']._create_stagings()
|
|
||||||
'runbot_merge.staging_cron',
|
|
||||||
# env['runbot_merge.pull_requests']._check_linked_prs_statuses()
|
|
||||||
'runbot_merge.check_linked_prs_status',
|
|
||||||
# env['runbot_merge.pull_requests.feedback']._send()
|
|
||||||
'runbot_merge.feedback_cron',
|
|
||||||
]
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def project(env, config):
|
def project(env, config):
|
||||||
return env['runbot_merge.project'].create({
|
return env['runbot_merge.project'].create({
|
||||||
|
@ -3,6 +3,7 @@ import itertools
|
|||||||
import json
|
import json
|
||||||
import textwrap
|
import textwrap
|
||||||
import time
|
import time
|
||||||
|
from typing import Callable
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@ -13,8 +14,9 @@ import odoo
|
|||||||
from utils import _simple_init, seen, matches, get_partner, Commit, pr_page, to_pr, part_of, ensure_one
|
from utils import _simple_init, seen, matches, get_partner, Commit, pr_page, to_pr, part_of, ensure_one
|
||||||
|
|
||||||
@pytest.fixture(autouse=True)
|
@pytest.fixture(autouse=True)
|
||||||
def _configure_statuses(project, repo):
|
def _configure_statuses(request, project, repo):
|
||||||
project.repo_ids.required_statuses = 'legal/cla,ci/runbot'
|
if 'defaultstatuses' not in request.keywords:
|
||||||
|
project.repo_ids.required_statuses = 'legal/cla,ci/runbot'
|
||||||
|
|
||||||
@pytest.fixture(autouse=True, params=["statuses", "rpc"])
|
@pytest.fixture(autouse=True, params=["statuses", "rpc"])
|
||||||
def stagings(request, env, project, repo):
|
def stagings(request, env, project, repo):
|
||||||
@ -562,19 +564,32 @@ def test_staging_conflict_second(env, repo, users, config):
|
|||||||
assert pr1_id.state == 'error', "now pr1 should be in error"
|
assert pr1_id.state == 'error', "now pr1 should be in error"
|
||||||
|
|
||||||
|
|
||||||
def test_staging_ci_timeout(env, repo, config, page):
|
@pytest.mark.defaultstatuses
|
||||||
|
@pytest.mark.parametrize('update_op', [
|
||||||
|
pytest.param(
|
||||||
|
lambda _: {'timeout_limit': datetime.datetime.now().isoformat(" ", "seconds")},
|
||||||
|
id="set-timeout-limit",
|
||||||
|
),
|
||||||
|
pytest.param(
|
||||||
|
lambda timeout: {'staged_at': (datetime.datetime.now() - datetime.timedelta(minutes=2*timeout)).isoformat(" ", "seconds")},
|
||||||
|
id="set-staged-at",
|
||||||
|
),
|
||||||
|
])
|
||||||
|
def test_staging_ci_timeout(env, repo, config, page, update_op: Callable[[int], dict]):
|
||||||
"""If a staging timeouts (~ delay since staged greater than
|
"""If a staging timeouts (~ delay since staged greater than
|
||||||
configured)... requeue?
|
configured)... requeue?
|
||||||
"""
|
"""
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'f': 'm'})
|
m, _, c2 = repo.make_commits(
|
||||||
|
None,
|
||||||
|
Commit('initial', tree={'f': 'm'}),
|
||||||
|
Commit('first', tree={'f': 'c1'}),
|
||||||
|
Commit('second', tree={'f': 'c2'}),
|
||||||
|
)
|
||||||
repo.make_ref('heads/master', m)
|
repo.make_ref('heads/master', m)
|
||||||
|
|
||||||
c1 = repo.make_commit(m, 'first', None, tree={'f': 'c1'})
|
|
||||||
c2 = repo.make_commit(c1, 'second', None, tree={'f': 'c2'})
|
|
||||||
pr = repo.make_pr(title='title', body='body', target='master', head=c2)
|
pr = repo.make_pr(title='title', body='body', target='master', head=c2)
|
||||||
repo.post_status(pr.head, 'success', 'ci/runbot')
|
repo.post_status(pr.head, 'success')
|
||||||
repo.post_status(pr.head, 'success', 'legal/cla')
|
|
||||||
pr.post_comment('hansen r+ rebase-merge', config['role_reviewer']['token'])
|
pr.post_comment('hansen r+ rebase-merge', config['role_reviewer']['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
@ -582,23 +597,26 @@ def test_staging_ci_timeout(env, repo, config, page):
|
|||||||
assert pr_id.staging_id
|
assert pr_id.staging_id
|
||||||
timeout = env['runbot_merge.project'].search([]).ci_timeout
|
timeout = env['runbot_merge.project'].search([]).ci_timeout
|
||||||
|
|
||||||
pr_id.staging_id.staged_at = odoo.fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(minutes=2*timeout))
|
pr_id.staging_id.write(update_op(timeout))
|
||||||
env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron')
|
env.run_crons(None)
|
||||||
assert pr_id.state == 'error', "timeout should fail the PR"
|
assert pr_id.state == 'error', "timeout should fail the PR"
|
||||||
|
|
||||||
dangerbox = pr_page(page, pr).cssselect('.alert-danger span')
|
dangerbox = pr_page(page, pr).cssselect('.alert-danger span')
|
||||||
assert dangerbox
|
assert dangerbox
|
||||||
assert dangerbox[0].text == 'timed out (>60 minutes)'
|
assert dangerbox[0].text == 'timed out (>60 minutes)'
|
||||||
|
|
||||||
|
@pytest.mark.defaultstatuses
|
||||||
def test_timeout_bump_on_pending(env, repo, config):
|
def test_timeout_bump_on_pending(env, repo, config):
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'f': '0'})
|
[m, c] = repo.make_commits(
|
||||||
|
None,
|
||||||
|
Commit('initial', tree={'f': '0'}),
|
||||||
|
Commit('c', tree={'f': '1'}),
|
||||||
|
)
|
||||||
repo.make_ref('heads/master', m)
|
repo.make_ref('heads/master', m)
|
||||||
|
|
||||||
c = repo.make_commit(m, 'c', None, tree={'f': '1'})
|
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
||||||
repo.post_status(prx.head, 'success', 'ci/runbot')
|
repo.post_status(prx.head, 'success')
|
||||||
repo.post_status(prx.head, 'success', 'legal/cla')
|
|
||||||
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
@ -606,8 +624,8 @@ def test_timeout_bump_on_pending(env, repo, config):
|
|||||||
old_timeout = odoo.fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(days=15))
|
old_timeout = odoo.fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(days=15))
|
||||||
st.timeout_limit = old_timeout
|
st.timeout_limit = old_timeout
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.master', 'pending', 'ci/runbot')
|
repo.post_status('staging.master', 'pending')
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert st.timeout_limit > old_timeout
|
assert st.timeout_limit > old_timeout
|
||||||
|
|
||||||
def test_staging_ci_failure_single(env, repo, users, config, page):
|
def test_staging_ci_failure_single(env, repo, users, config, page):
|
||||||
@ -1092,63 +1110,63 @@ def test_reopen_merged_pr(env, repo, config, users):
|
|||||||
]
|
]
|
||||||
|
|
||||||
class TestNoRequiredStatus:
|
class TestNoRequiredStatus:
|
||||||
|
@pytest.mark.defaultstatuses
|
||||||
def test_basic(self, env, repo, config):
|
def test_basic(self, env, repo, config):
|
||||||
""" check that mergebot can work on a repo with no CI at all
|
""" check that mergebot can work on a repo with no CI at all
|
||||||
"""
|
"""
|
||||||
env['runbot_merge.repository'].search([('name', '=', repo.name)]).status_ids = False
|
env['runbot_merge.repository'].search([('name', '=', repo.name)]).status_ids = False
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'0': '0'})
|
[m, c] = repo.make_commits(
|
||||||
|
None,
|
||||||
|
Commit('initial', tree={'0': '0'}),
|
||||||
|
Commit('first', tree={'0': '1'}),
|
||||||
|
)
|
||||||
repo.make_ref('heads/master', m)
|
repo.make_ref('heads/master', m)
|
||||||
|
|
||||||
c = repo.make_commit(m, 'first', None, tree={'0': '1'})
|
pr = repo.make_pr(title='title', body='body', target='master', head=c)
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
pr = env['runbot_merge.pull_requests'].search([
|
pr_id = to_pr(env, pr)
|
||||||
('repository.name', '=', repo.name),
|
|
||||||
('number', '=', prx.number)
|
st = env['runbot_merge.stagings'].search([], context={'active_test': False})
|
||||||
])
|
|
||||||
assert pr.state == 'ready'
|
|
||||||
st = pr.staging_id
|
|
||||||
assert st
|
|
||||||
env.run_crons()
|
|
||||||
assert st.state == 'success'
|
assert st.state == 'success'
|
||||||
assert pr.state == 'merged'
|
assert pr_id.state == 'merged'
|
||||||
|
|
||||||
|
@pytest.mark.defaultstatuses
|
||||||
def test_updated(self, env, repo, config):
|
def test_updated(self, env, repo, config):
|
||||||
env['runbot_merge.repository'].search([('name', '=', repo.name)]).status_ids = False
|
env['runbot_merge.repository'].search([('name', '=', repo.name)]).status_ids = False
|
||||||
with repo:
|
with repo:
|
||||||
m = repo.make_commit(None, 'initial', None, tree={'0': '0'})
|
[m, c] = repo.make_commits(
|
||||||
|
None,
|
||||||
|
Commit('initial', tree={'0': '0'}),
|
||||||
|
Commit('first', tree={'0': '1'}),
|
||||||
|
)
|
||||||
repo.make_ref('heads/master', m)
|
repo.make_ref('heads/master', m)
|
||||||
|
|
||||||
c = repo.make_commit(m, 'first', None, tree={'0': '1'})
|
pr = repo.make_pr(title='title', body='body', target='master', head=c)
|
||||||
prx = repo.make_pr(title='title', body='body', target='master', head=c)
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
pr = env['runbot_merge.pull_requests'].search([
|
pr_id = to_pr(env, pr)
|
||||||
('repository.name', '=', repo.name),
|
assert pr_id.state == 'validated'
|
||||||
('number', '=', prx.number)
|
|
||||||
])
|
|
||||||
assert pr.state == 'validated'
|
|
||||||
|
|
||||||
# normal push
|
# normal push
|
||||||
with repo:
|
with repo:
|
||||||
repo.make_commits(c, repo.Commit('second', tree={'0': '2'}), ref=prx.ref)
|
repo.make_commits(c, repo.Commit('second', tree={'0': '2'}), ref=pr.ref)
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
assert pr.state == 'validated'
|
assert pr_id.state == 'validated'
|
||||||
with repo:
|
with repo:
|
||||||
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
assert pr.state == 'ready'
|
assert pr_id.state == 'ready'
|
||||||
|
|
||||||
# force push
|
# force push
|
||||||
with repo:
|
with repo:
|
||||||
repo.make_commits(m, repo.Commit('xxx', tree={'0': 'm'}), ref=prx.ref)
|
repo.make_commits(m, repo.Commit('xxx', tree={'0': 'm'}), ref=pr.ref)
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
assert pr.state == 'validated'
|
assert pr_id.state == 'validated'
|
||||||
with repo:
|
with repo:
|
||||||
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
assert pr.state == 'ready'
|
assert pr_id.state == 'ready'
|
||||||
|
|
||||||
class TestRetry:
|
class TestRetry:
|
||||||
@pytest.mark.xfail(reason="This may not be a good idea as it could lead to tons of rebuild spam")
|
@pytest.mark.xfail(reason="This may not be a good idea as it could lead to tons of rebuild spam")
|
||||||
@ -1230,7 +1248,7 @@ class TestRetry:
|
|||||||
with repo:
|
with repo:
|
||||||
pr.post_comment('hansen retry', config['role_' + retrier]['token'])
|
pr.post_comment('hansen retry', config['role_' + retrier]['token'])
|
||||||
assert pr_id.state == 'ready'
|
assert pr_id.state == 'ready'
|
||||||
env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron')
|
env.run_crons(None)
|
||||||
|
|
||||||
staging_head2 = repo.commit('heads/staging.master')
|
staging_head2 = repo.commit('heads/staging.master')
|
||||||
assert staging_head2 != staging_head
|
assert staging_head2 != staging_head
|
||||||
@ -1263,7 +1281,7 @@ class TestRetry:
|
|||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
pr.post_comment('hansen retry', config['role_reviewer']['token'])
|
pr.post_comment('hansen retry', config['role_reviewer']['token'])
|
||||||
env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron')
|
env.run_crons(None)
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.master', 'success', 'legal/cla')
|
repo.post_status('staging.master', 'success', 'legal/cla')
|
||||||
@ -1293,42 +1311,36 @@ class TestRetry:
|
|||||||
(users['user'], "@{reviewer} retry makes no sense when the PR is not in error.".format_map(users)),
|
(users['user'], "@{reviewer} retry makes no sense when the PR is not in error.".format_map(users)),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@pytest.mark.defaultstatuses
|
||||||
@pytest.mark.parametrize('disabler', ['user', 'other', 'reviewer'])
|
@pytest.mark.parametrize('disabler', ['user', 'other', 'reviewer'])
|
||||||
def test_retry_disable(self, env, repo, disabler, users, config):
|
def test_retry_disable(self, env, repo, disabler, users, config):
|
||||||
with repo:
|
with repo:
|
||||||
prx = _simple_init(repo)
|
prx = _simple_init(repo)
|
||||||
repo.post_status(prx.head, 'success', 'ci/runbot')
|
repo.post_status(prx.head, 'success')
|
||||||
repo.post_status(prx.head, 'success', 'legal/cla')
|
|
||||||
prx.post_comment('hansen r+ delegate=%s rebase-merge' % users['other'],
|
prx.post_comment('hansen r+ delegate=%s rebase-merge' % users['other'],
|
||||||
config["role_reviewer"]['token'])
|
config["role_reviewer"]['token'])
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
assert env['runbot_merge.pull_requests'].search([
|
pr_id = to_pr(env, prx)
|
||||||
('repository.name', '=', repo.name),
|
staging_id = pr_id.staging_id
|
||||||
('number', '=', prx.number)
|
assert staging_id
|
||||||
]).staging_id
|
|
||||||
|
|
||||||
staging_head = repo.commit('heads/staging.master')
|
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.master', 'success', 'legal/cla')
|
repo.post_status('staging.master', 'failure')
|
||||||
repo.post_status('staging.master', 'failure', 'ci/runbot')
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
pr = env['runbot_merge.pull_requests'].search([
|
assert staging_id.state == 'failure'
|
||||||
('repository.name', '=', repo.name),
|
assert not staging_id.active
|
||||||
('number', '=', prx.number)
|
assert pr_id.state == 'error'
|
||||||
])
|
|
||||||
assert pr.state == 'error'
|
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
prx.post_comment('hansen r-', config['role_' + disabler]['token'])
|
prx.post_comment('hansen r-', config['role_' + disabler]['token'])
|
||||||
assert pr.state == 'validated'
|
assert pr_id.state == 'validated'
|
||||||
with repo:
|
with repo:
|
||||||
repo.make_commit(prx.ref, 'third', None, tree={'m': 'c3'})
|
repo.make_commit(prx.ref, 'third', None, tree={'m': 'c3'})
|
||||||
# just in case, apparently in some case the first post_status uses the old head...
|
# just in case, apparently in some case the first post_status uses the old head...
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status(prx.head, 'success', 'ci/runbot')
|
repo.post_status(prx.head, 'success')
|
||||||
repo.post_status(prx.head, 'success', 'legal/cla')
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
assert pr.state == 'validated'
|
assert pr_id.state == 'validated'
|
||||||
|
|
||||||
class TestMergeMethod:
|
class TestMergeMethod:
|
||||||
"""
|
"""
|
||||||
@ -1759,7 +1771,7 @@ commits, I need to know how to merge it:
|
|||||||
|
|
||||||
c0 = repo.make_commit(m, 'C0', None, tree={'a': 'b'})
|
c0 = repo.make_commit(m, 'C0', None, tree={'a': 'b'})
|
||||||
prx = repo.make_pr(title="gibberish", body="blahblah", target='master', head=c0)
|
prx = repo.make_pr(title="gibberish", body="blahblah", target='master', head=c0)
|
||||||
env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron')
|
env.run_crons(None)
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status(prx.head, 'success', 'legal/cla')
|
repo.post_status(prx.head, 'success', 'legal/cla')
|
||||||
@ -1801,7 +1813,7 @@ commits, I need to know how to merge it:
|
|||||||
|
|
||||||
c0 = repo.make_commit(m, 'C0', None, tree={'a': 'b'})
|
c0 = repo.make_commit(m, 'C0', None, tree={'a': 'b'})
|
||||||
prx = repo.make_pr(title="gibberish", body=None, target='master', head=c0)
|
prx = repo.make_pr(title="gibberish", body=None, target='master', head=c0)
|
||||||
env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron')
|
env.run_crons(None)
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status(prx.head, 'success', 'legal/cla')
|
repo.post_status(prx.head, 'success', 'legal/cla')
|
||||||
@ -2463,7 +2475,7 @@ class TestPRUpdate(object):
|
|||||||
|
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
pr_id = to_pr(env, pr)
|
pr_id = to_pr(env, pr)
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert pr_id.message == 'title\n\nbody'
|
assert pr_id.message == 'title\n\nbody'
|
||||||
assert pr_id.state == 'ready'
|
assert pr_id.state == 'ready'
|
||||||
old_reviewer = pr_id.reviewed_by
|
old_reviewer = pr_id.reviewed_by
|
||||||
@ -3008,7 +3020,7 @@ class TestBatching(object):
|
|||||||
reviewer=None,
|
reviewer=None,
|
||||||
)
|
)
|
||||||
pr02.post_comment('hansen alone r+', config['role_reviewer']['token'])
|
pr02.post_comment('hansen alone r+', config['role_reviewer']['token'])
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
pr01_id = to_pr(env, pr01)
|
pr01_id = to_pr(env, pr01)
|
||||||
assert pr01_id.blocked is False
|
assert pr01_id.blocked is False
|
||||||
pr02_id = to_pr(env, pr02)
|
pr02_id = to_pr(env, pr02)
|
||||||
@ -3077,7 +3089,7 @@ class TestBatching(object):
|
|||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.master', 'success', 'ci/runbot')
|
repo.post_status('staging.master', 'success', 'ci/runbot')
|
||||||
repo.post_status('staging.master', 'success', 'legal/cla')
|
repo.post_status('staging.master', 'success', 'legal/cla')
|
||||||
env.run_crons('runbot_merge.process_updated_commits', 'runbot_merge.merge_cron', 'runbot_merge.staging_cron')
|
env.run_crons(None)
|
||||||
assert pr2.state == 'merged'
|
assert pr2.state == 'merged'
|
||||||
|
|
||||||
class TestReviewing:
|
class TestReviewing:
|
||||||
@ -3534,7 +3546,6 @@ class TestUnknownPR:
|
|||||||
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
prx.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
env.run_crons(
|
env.run_crons(
|
||||||
'runbot_merge.fetch_prs_cron',
|
'runbot_merge.fetch_prs_cron',
|
||||||
'runbot_merge.feedback_cron',
|
|
||||||
)
|
)
|
||||||
|
|
||||||
assert prx.comments == [
|
assert prx.comments == [
|
||||||
@ -3559,7 +3570,6 @@ class TestUnknownPR:
|
|||||||
prx.post_review('APPROVE', 'hansen r+', config['role_reviewer']['token'])
|
prx.post_review('APPROVE', 'hansen r+', config['role_reviewer']['token'])
|
||||||
env.run_crons(
|
env.run_crons(
|
||||||
'runbot_merge.fetch_prs_cron',
|
'runbot_merge.fetch_prs_cron',
|
||||||
'runbot_merge.feedback_cron',
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# FIXME: either split out reviews in local or merge reviews & comments in remote
|
# FIXME: either split out reviews in local or merge reviews & comments in remote
|
||||||
|
@ -34,15 +34,15 @@ def test_status_applies(env, repo, config):
|
|||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status(c, 'success', 'ci')
|
repo.post_status(c, 'success', 'ci')
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert pr_id.state == 'opened'
|
assert pr_id.state == 'opened'
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status(c, 'success', 'pr')
|
repo.post_status(c, 'success', 'pr')
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert pr_id.state == 'opened'
|
assert pr_id.state == 'opened'
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status(c, 'success', 'lint')
|
repo.post_status(c, 'success', 'lint')
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert pr_id.state == 'validated'
|
assert pr_id.state == 'validated'
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
@ -53,15 +53,15 @@ def test_status_applies(env, repo, config):
|
|||||||
assert st.state == 'pending'
|
assert st.state == 'pending'
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.master', 'success', 'ci')
|
repo.post_status('staging.master', 'success', 'ci')
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert st.state == 'pending'
|
assert st.state == 'pending'
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.master', 'success', 'lint')
|
repo.post_status('staging.master', 'success', 'lint')
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert st.state == 'pending'
|
assert st.state == 'pending'
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.master', 'success', 'staging')
|
repo.post_status('staging.master', 'success', 'staging')
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert st.state == 'success'
|
assert st.state == 'success'
|
||||||
|
|
||||||
@pytest.mark.usefixtures('_setup_statuses')
|
@pytest.mark.usefixtures('_setup_statuses')
|
||||||
@ -84,11 +84,11 @@ def test_status_skipped(env, project, repo, config):
|
|||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status(c, 'success', 'ci')
|
repo.post_status(c, 'success', 'ci')
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert pr_id.state == 'opened'
|
assert pr_id.state == 'opened'
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status(c, 'success', 'pr')
|
repo.post_status(c, 'success', 'pr')
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert pr_id.state == 'validated'
|
assert pr_id.state == 'validated'
|
||||||
|
|
||||||
with repo:
|
with repo:
|
||||||
@ -99,11 +99,11 @@ def test_status_skipped(env, project, repo, config):
|
|||||||
assert st.state == 'pending'
|
assert st.state == 'pending'
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.maintenance', 'success', 'staging')
|
repo.post_status('staging.maintenance', 'success', 'staging')
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert st.state == 'pending'
|
assert st.state == 'pending'
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.maintenance', 'success', 'ci')
|
repo.post_status('staging.maintenance', 'success', 'ci')
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert st.state == 'success'
|
assert st.state == 'success'
|
||||||
|
|
||||||
def test_pseudo_version_tag(env, project, make_repo, setreviewers, config):
|
def test_pseudo_version_tag(env, project, make_repo, setreviewers, config):
|
||||||
|
@ -45,10 +45,21 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
|
|||||||
staging_id = branch_id.active_staging_id
|
staging_id = branch_id.active_staging_id
|
||||||
assert staging_id == pr_id.staging_id
|
assert staging_id == pr_id.staging_id
|
||||||
|
|
||||||
|
# staging of `pr` should have generated a staging branch
|
||||||
|
_ = repo.get_ref('heads/staging.other')
|
||||||
|
# stagings should not need a tmp branch anymore, so this should not exist
|
||||||
|
with pytest.raises(AssertionError, match=r'Not Found'):
|
||||||
|
repo.get_ref('heads/tmp.other')
|
||||||
|
|
||||||
# disable branch "other"
|
# disable branch "other"
|
||||||
branch_id.active = False
|
branch_id.active = False
|
||||||
env.run_crons()
|
env.run_crons()
|
||||||
|
|
||||||
|
# triggered cleanup should have deleted the staging for the disabled `other`
|
||||||
|
# target branch
|
||||||
|
with pytest.raises(AssertionError, match=r'Not Found'):
|
||||||
|
repo.get_ref('heads/staging.other')
|
||||||
|
|
||||||
# the PR should not have been closed implicitly
|
# the PR should not have been closed implicitly
|
||||||
assert pr_id.state == 'ready'
|
assert pr_id.state == 'ready'
|
||||||
# but it should be unstaged
|
# but it should be unstaged
|
||||||
@ -86,17 +97,10 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
|
|||||||
assert pr_id.staging_id
|
assert pr_id.staging_id
|
||||||
|
|
||||||
# staging of `pr` should have generated a staging branch
|
# staging of `pr` should have generated a staging branch
|
||||||
_ = repo.get_ref('heads/staging.other')
|
_ = repo.get_ref('heads/staging.other2')
|
||||||
# stagings should not need a tmp branch anymore, so this should not exist
|
# stagings should not need a tmp branch anymore, so this should not exist
|
||||||
with pytest.raises(AssertionError, match=r'Not Found'):
|
with pytest.raises(AssertionError, match=r'Not Found'):
|
||||||
repo.get_ref('heads/tmp.other')
|
repo.get_ref('heads/tmp.other2')
|
||||||
|
|
||||||
assert env['base'].run_crons()
|
|
||||||
|
|
||||||
# triggered cleanup should have deleted the staging for the disabled `other`
|
|
||||||
# target branch
|
|
||||||
with pytest.raises(AssertionError, match=r'Not Found'):
|
|
||||||
repo.get_ref('heads/staging.other')
|
|
||||||
|
|
||||||
def test_new_pr_no_branch(env, project, make_repo, setreviewers, users):
|
def test_new_pr_no_branch(env, project, make_repo, setreviewers, users):
|
||||||
""" A new PR to an *unknown* branch should be ignored and warn
|
""" A new PR to an *unknown* branch should be ignored and warn
|
||||||
|
@ -493,7 +493,7 @@ def test_ff_fail(env, project, repo_a, repo_b, config):
|
|||||||
with repo_a, repo_b:
|
with repo_a, repo_b:
|
||||||
repo_a.post_status('heads/staging.master', 'success')
|
repo_a.post_status('heads/staging.master', 'success')
|
||||||
repo_b.post_status('heads/staging.master', 'success')
|
repo_b.post_status('heads/staging.master', 'success')
|
||||||
env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron')
|
env.run_crons(None)
|
||||||
assert repo_b.commit('heads/master').id == cn,\
|
assert repo_b.commit('heads/master').id == cn,\
|
||||||
"B should still be at the conflicting commit"
|
"B should still be at the conflicting commit"
|
||||||
assert repo_a.commit('heads/master').id == root_a,\
|
assert repo_a.commit('heads/master').id == root_a,\
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
import datetime
|
||||||
import functools
|
import functools
|
||||||
from itertools import repeat
|
from itertools import repeat
|
||||||
|
|
||||||
@ -66,16 +67,16 @@ def test_staging_priority(env, project, repo, config, mode, cutoff, second):
|
|||||||
with repo:
|
with repo:
|
||||||
[m] = repo.make_commits(None, Commit("m", tree={"ble": "1"}), ref="heads/master")
|
[m] = repo.make_commits(None, Commit("m", tree={"ble": "1"}), ref="heads/master")
|
||||||
|
|
||||||
[c] = repo.make_commits(m, Commit("c", tree={"1": "1"}), ref="heads/pr1")
|
repo.make_commits(m, Commit("c", tree={"1": "1"}), ref="heads/pr1")
|
||||||
pr1 = repo.make_pr(title="whatever", target="master", head="pr1")
|
pr1 = repo.make_pr(title="whatever", target="master", head="pr1")
|
||||||
|
|
||||||
[c] = repo.make_commits(m, Commit("c", tree={"2": "2"}), ref="heads/pr2")
|
repo.make_commits(m, Commit("c", tree={"2": "2"}), ref="heads/pr2")
|
||||||
pr2 = repo.make_pr(title="whatever", target="master", head="pr2")
|
pr2 = repo.make_pr(title="whatever", target="master", head="pr2")
|
||||||
|
|
||||||
[c] = repo.make_commits(m, Commit("c", tree={"3": "3"}), ref="heads/pr3")
|
repo.make_commits(m, Commit("c", tree={"3": "3"}), ref="heads/pr3")
|
||||||
pr3 = repo.make_pr(title="whatever", target="master", head="pr3")
|
pr3 = repo.make_pr(title="whatever", target="master", head="pr3")
|
||||||
|
|
||||||
[c] = repo.make_commits(m, Commit("c", tree={"4": "4"}), ref="heads/pr4")
|
repo.make_commits(m, Commit("c", tree={"4": "4"}), ref="heads/pr4")
|
||||||
pr4 = repo.make_pr(title="whatever", target="master", head="pr4")
|
pr4 = repo.make_pr(title="whatever", target="master", head="pr4")
|
||||||
|
|
||||||
prs = [pr1, pr2, pr3, pr4]
|
prs = [pr1, pr2, pr3, pr4]
|
||||||
@ -100,18 +101,27 @@ def test_staging_priority(env, project, repo, config, mode, cutoff, second):
|
|||||||
for pr, pr_id in zip(prs[cutoff:], pr_ids[cutoff:]):
|
for pr, pr_id in zip(prs[cutoff:], pr_ids[cutoff:]):
|
||||||
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
||||||
repo.post_status(pr_id.head, 'success')
|
repo.post_status(pr_id.head, 'success')
|
||||||
env.run_crons('runbot_merge.process_updated_commits')
|
env.run_crons(None)
|
||||||
assert not pr_ids.filtered(lambda p: p.blocked)
|
assert not pr_ids.filtered(lambda p: p.blocked)
|
||||||
|
|
||||||
# trigger a split
|
# trigger a split
|
||||||
with repo:
|
with repo:
|
||||||
repo.post_status('staging.master', 'failure')
|
repo.post_status('staging.master', 'failure')
|
||||||
env.run_crons('runbot_merge.process_updated_commits', 'runbot_merge.merge_cron')
|
|
||||||
|
# specifically delay creation of new staging to observe the failed
|
||||||
|
# staging's state and the splits
|
||||||
|
model, cron_id = env['ir.model.data'].check_object_reference('runbot_merge', 'staging_cron')
|
||||||
|
staging_cron = env[model].browse([cron_id])
|
||||||
|
staging_cron.active = False
|
||||||
|
|
||||||
|
env.run_crons(None)
|
||||||
assert not staging.active
|
assert not staging.active
|
||||||
assert not env['runbot_merge.stagings'].search([]).active
|
assert not env['runbot_merge.stagings'].search([]).active
|
||||||
assert env['runbot_merge.split'].search_count([]) == 2
|
assert env['runbot_merge.split'].search_count([]) == 2
|
||||||
|
|
||||||
env.run_crons()
|
staging_cron.active = True
|
||||||
|
# manually trigger that cron, as having the cron disabled prevented the creation of the triggers entirely
|
||||||
|
env.run_crons('runbot_merge.staging_cron')
|
||||||
|
|
||||||
# check that st.pr_ids are the PRs we expect
|
# check that st.pr_ids are the PRs we expect
|
||||||
st = env['runbot_merge.stagings'].search([])
|
st = env['runbot_merge.stagings'].search([])
|
||||||
|
Loading…
Reference in New Issue
Block a user