[IMP] runbot_merge: stop deleting batches & stagings

If we want a dashboard with a history of stagings, maybe not deleting
them would be a good idea.

A replacement for the headless stagings would probably be a good idea:
currently they're created when splitting a failed staging containing
more than one batch, but their only purpose is as splits of existing
batches to be deactivated/deleted to be re-staged (new batches &
stagings are created then as e.g. some of the batches may not be
merge-able anymore) and that's a bit weird.
This commit is contained in:
Xavier Morel 2018-06-18 12:59:57 +02:00 committed by xmo-odoo
parent 3b0a794c7b
commit 8bc90283f8
4 changed files with 38 additions and 27 deletions

View File

@ -102,8 +102,8 @@ class Project(models.Model):
# FIXME: this is the staging head rather than the actual merge commit for the PR # FIXME: this is the staging head rather than the actual merge commit for the PR
gh[pr.repository.name].close(pr.number, 'Merged in {}'.format(staging_heads[pr.repository.name])) gh[pr.repository.name].close(pr.number, 'Merged in {}'.format(staging_heads[pr.repository.name]))
finally: finally:
staging.batch_ids.unlink() staging.batch_ids.write({'active': False})
staging.unlink() staging.write({'active': False})
elif staging.state == 'failure' or project.is_timed_out(staging): elif staging.state == 'failure' or project.is_timed_out(staging):
staging.try_splitting() staging.try_splitting()
# else let flow # else let flow
@ -111,7 +111,7 @@ class Project(models.Model):
# check for stageable branches/prs # check for stageable branches/prs
for branch in project.branch_ids: for branch in project.branch_ids:
logger.info( logger.info(
"Checking %s (%s) for staging: %s, ignore? %s", "Checking %s (%s) for staging: %s, skip? %s",
branch, branch.name, branch, branch.name,
branch.active_staging_id, branch.active_staging_id,
bool(branch.active_staging_id) bool(branch.active_staging_id)
@ -125,8 +125,8 @@ class Project(models.Model):
min(pr.priority) as priority, min(pr.priority) as priority,
array_agg(pr.id) AS match array_agg(pr.id) AS match
FROM runbot_merge_pull_requests pr FROM runbot_merge_pull_requests pr
LEFT JOIN runbot_merge_batch batch ON pr.batch_id = batch.id AND batch.active
WHERE pr.target = %s WHERE pr.target = %s
AND pr.batch_id IS NULL
-- exclude terminal states (so there's no issue when -- exclude terminal states (so there's no issue when
-- deleting branches & reusing labels) -- deleting branches & reusing labels)
AND pr.state != 'merged' AND pr.state != 'merged'
@ -141,19 +141,21 @@ class Project(models.Model):
priority = rows[0][0] if rows else -1 priority = rows[0][0] if rows else -1
if priority == 0: if priority == 0:
# p=0 take precedence over all else # p=0 take precedence over all else
batches = [ batched_prs = [
PRs.browse(pr_ids) PRs.browse(pr_ids)
for _, pr_ids in takewhile(lambda r: r[0] == priority, rows) for _, pr_ids in takewhile(lambda r: r[0] == priority, rows)
] ]
elif branch.staging_ids: elif branch.staging_ids:
# Splits can generate inactive stagings, restage these first # Splits can generate inactive stagings, restage these first
# FIXME: these are really weird because they're stagings which never actually get staged, they're just used as splits from failed stagings => maybe add splits for this?
staging = branch.staging_ids[0] staging = branch.staging_ids[0]
logger.info("Found inactive staging %s, reactivating", staging) logger.info("Found inactive staging of PRs %s, re-staging", staging.mapped('batch_ids.prs'))
batches = [batch.prs for batch in staging.batch_ids] batched_prs = [batch.prs for batch in staging.batch_ids]
staging.unlink() staging.batch_ids.write({'active': False})
staging.active = False
elif rows: elif rows:
# p=1 or p=2 # p=1 or p=2
batches = [PRs.browse(pr_ids) for _, pr_ids in takewhile(lambda r: r[0] == priority, rows)] batched_prs = [PRs.browse(pr_ids) for _, pr_ids in takewhile(lambda r: r[0] == priority, rows)]
else: else:
continue continue
@ -166,7 +168,7 @@ class Project(models.Model):
gh.set_ref('tmp.{}'.format(branch.name), it['head']) gh.set_ref('tmp.{}'.format(branch.name), it['head'])
batch_limit = project.batch_limit batch_limit = project.batch_limit
for batch in batches: for batch in batched_prs:
if len(staged) >= batch_limit: if len(staged) >= batch_limit:
break break
staged |= Batch.stage(meta, batch) staged |= Batch.stage(meta, batch)
@ -196,7 +198,7 @@ class Project(models.Model):
'state_to': 'staged', 'state_to': 'staged',
}) })
logger.info("Created staging %s", st) logger.info("Created staging %s (%s)", st, staged)
Repos = self.env['runbot_merge.repository'] Repos = self.env['runbot_merge.repository']
ghs = {} ghs = {}
@ -268,7 +270,7 @@ class Project(models.Model):
_logger.info("PR %d retargeted to non-managed branch %s, deleting", pr['number'], _logger.info("PR %d retargeted to non-managed branch %s, deleting", pr['number'],
target) target)
ignored_targets.update([target]) ignored_targets.update([target])
existing.unlink() existing.write({'active': False})
else: else:
if message != existing.message: if message != existing.message:
_logger.info("Updating PR %d ({%s} != {%s})", pr['number'], existing.message, message) _logger.info("Updating PR %d ({%s} != {%s})", pr['number'], existing.message, message)
@ -338,7 +340,7 @@ class Branch(models.Model):
active_staging_id = fields.One2many( active_staging_id = fields.One2many(
'runbot_merge.stagings', 'target', 'runbot_merge.stagings', 'target',
domain=[("heads", "!=", False)], domain=[("heads", "!=", False), ('active', '=', True)],
help="Currently running staging for the branch, there should be only one" help="Currently running staging for the branch, there should be only one"
) )
staging_ids = fields.One2many('runbot_merge.stagings', 'target') staging_ids = fields.One2many('runbot_merge.stagings', 'target')
@ -394,7 +396,8 @@ class PullRequests(models.Model):
statuses = fields.Text(compute='_compute_statuses') statuses = fields.Text(compute='_compute_statuses')
batch_id = fields.Many2one('runbot_merge.batch') batch_id = fields.Many2one('runbot_merge.batch',compute='_compute_active_batch', store=True)
batch_ids = fields.Many2many('runbot_merge.batch')
staging_id = fields.Many2one(related='batch_id.staging_id', store=True) staging_id = fields.Many2one(related='batch_id.staging_id', store=True)
@api.depends('head') @api.depends('head')
@ -405,6 +408,11 @@ class PullRequests(models.Model):
if c and c.statuses: if c and c.statuses:
s.statuses = pprint.pformat(json.loads(c.statuses)) s.statuses = pprint.pformat(json.loads(c.statuses))
@api.depends('batch_ids.active')
def _compute_active_batch(self):
for r in self:
r.batch_id = r.batch_ids.filtered(lambda b: b.active)[:1]
def _parse_command(self, commandstring): def _parse_command(self, commandstring):
m = re.match(r'(\w+)(?:([+-])|=(.*))?', commandstring) m = re.match(r'(\w+)(?:([+-])|=(.*))?', commandstring)
if not m: if not m:
@ -707,6 +715,7 @@ class Stagings(models.Model):
('failure', 'Failure'), ('failure', 'Failure'),
('pending', 'Pending'), ('pending', 'Pending'),
]) ])
active = fields.Boolean(default=True)
staged_at = fields.Datetime(default=fields.Datetime.now) staged_at = fields.Datetime(default=fields.Datetime.now)
restaged = fields.Integer(default=0) restaged = fields.Integer(default=0)
@ -745,8 +754,8 @@ class Stagings(models.Model):
return return
_logger.info(reason, *args) _logger.info(reason, *args)
self.batch_ids.unlink() self.batch_ids.write({'active': False})
self.unlink() self.write({'active': False})
def fail(self, message, prs=None): def fail(self, message, prs=None):
_logger.error("Staging %s failed: %s", self, message) _logger.error("Staging %s failed: %s", self, message)
@ -756,24 +765,26 @@ class Stagings(models.Model):
pr.repository.github().comment( pr.repository.github().comment(
pr.number, "Staging failed: %s" % message) pr.number, "Staging failed: %s" % message)
self.batch_ids.unlink() self.batch_ids.write({'active': False})
self.unlink() self.write({'active': False})
def try_splitting(self): def try_splitting(self):
batches = len(self.batch_ids) batches = len(self.batch_ids)
if batches > 1: if batches > 1:
midpoint = batches // 2 midpoint = batches // 2
h, t = self.batch_ids[:midpoint], self.batch_ids[midpoint:] h, t = self.batch_ids[:midpoint], self.batch_ids[midpoint:]
self.env['runbot_merge.stagings'].create({ sh = self.env['runbot_merge.stagings'].create({
'target': self.target.id, 'target': self.target.id,
'batch_ids': [(4, batch.id, 0) for batch in h], 'batch_ids': [(4, batch.id, 0) for batch in h],
}) })
self.env['runbot_merge.stagings'].create({ st = self.env['runbot_merge.stagings'].create({
'target': self.target.id, 'target': self.target.id,
'batch_ids': [(4, batch.id, 0) for batch in t], 'batch_ids': [(4, batch.id, 0) for batch in t],
}) })
_logger.info("Split %s to %s (%s) and %s (%s)",
self, h, sh, t, st)
# apoptosis # apoptosis
self.unlink() self.active = False
return True return True
# single batch => the staging is an unredeemable failure # single batch => the staging is an unredeemable failure
@ -820,7 +831,9 @@ class Batch(models.Model):
target = fields.Many2one('runbot_merge.branch', required=True) target = fields.Many2one('runbot_merge.branch', required=True)
staging_id = fields.Many2one('runbot_merge.stagings') staging_id = fields.Many2one('runbot_merge.stagings')
prs = fields.One2many('runbot_merge.pull_requests', 'batch_id') prs = fields.Many2many('runbot_merge.pull_requests')
active = fields.Boolean(default=True)
@api.constrains('target', 'prs') @api.constrains('target', 'prs')
def _check_prs(self): def _check_prs(self):

View File

@ -357,13 +357,11 @@ class Repo(object):
return (200, {}) return (200, {})
def _remove_label(self, _, number, label): def _remove_label(self, _, number, label):
print('remove_label', number, label)
try: try:
pr = self.issues[int(number)] pr = self.issues[int(number)]
except KeyError: except KeyError:
return (404, None) return (404, None)
print(pr, pr.labels)
try: try:
pr.labels.remove(werkzeug.urls.url_unquote(label)) pr.labels.remove(werkzeug.urls.url_unquote(label))
except KeyError: except KeyError:

View File

@ -136,7 +136,7 @@ def test_staging_conflict(env, repo):
env['runbot_merge.project']._check_progress() env['runbot_merge.project']._check_progress()
p_2 = env['runbot_merge.pull_requests'].search([ p_2 = env['runbot_merge.pull_requests'].search([
('repository.name', '=', repo.name), ('repository.name', '=', repo.name),
('number', '=', 2) ('number', '=', pr2.number)
]) ])
assert p_2.state == 'ready', "PR2 should not have been staged since there is a pending staging for master" assert p_2.state == 'ready', "PR2 should not have been staged since there is a pending staging for master"
assert pr2.labels == {'seen 🙂', 'CI 🤖', 'r+ 👌'} assert pr2.labels == {'seen 🙂', 'CI 🤖', 'r+ 👌'}
@ -967,7 +967,7 @@ class TestBatching(object):
env['runbot_merge.project']._check_progress() env['runbot_merge.project']._check_progress()
# first staging should be cancelled and PR0 should be staged # first staging should be cancelled and PR0 should be staged
# regardless of CI (or lack thereof) # regardless of CI (or lack thereof)
assert not staging_1.exists() assert not staging_1.active
assert not p_11.staging_id and not p_12.staging_id assert not p_11.staging_id and not p_12.staging_id
assert p_01.staging_id assert p_01.staging_id

View File

@ -309,7 +309,7 @@ def test_batching_split(env, repo_a, repo_b):
env['runbot_merge.project']._check_progress() env['runbot_merge.project']._check_progress()
assert not st0.exists() assert not st0.active
sts = env['runbot_merge.stagings'].search([]) sts = env['runbot_merge.stagings'].search([])
assert len(sts) == 2 assert len(sts) == 2
st1, st2 = sts st1, st2 = sts