mirror of
https://github.com/odoo/runbot.git
synced 2025-03-30 23:05:44 +07:00
[IMP] runbot_merge: don't create unstaged stagings
Previously when splitting staging we'd create two never-staged stagings. In a system where the stagings get deleted once done with (succeeeded or failed) that's not really important, but now that we want to keep stagings around inactive things get problematic as this method gunks up the stagings table, plus the post-split stagings would "steal" the original's batches, losing information (relation between stagings and batches). Replace these empty stagings with dedicated *split* objects. A batch can belong to both a staging and a split, the split is deleted once a new staging has been created from it. Eventually we may want to make batches shared between stagings (so we can track the entire history of a batch) but currently that's only PR-level.
This commit is contained in:
parent
8bc90283f8
commit
76c4d24bf5
@ -145,14 +145,11 @@ class Project(models.Model):
|
|||||||
PRs.browse(pr_ids)
|
PRs.browse(pr_ids)
|
||||||
for _, pr_ids in takewhile(lambda r: r[0] == priority, rows)
|
for _, pr_ids in takewhile(lambda r: r[0] == priority, rows)
|
||||||
]
|
]
|
||||||
elif branch.staging_ids:
|
elif branch.split_ids:
|
||||||
# Splits can generate inactive stagings, restage these first
|
split_ids = branch.split_ids[0]
|
||||||
# FIXME: these are really weird because they're stagings which never actually get staged, they're just used as splits from failed stagings => maybe add splits for this?
|
logger.info("Found split of PRs %s, re-staging", split_ids.mapped('batch_ids.prs'))
|
||||||
staging = branch.staging_ids[0]
|
batched_prs = [batch.prs for batch in split_ids.batch_ids]
|
||||||
logger.info("Found inactive staging of PRs %s, re-staging", staging.mapped('batch_ids.prs'))
|
split_ids.unlink()
|
||||||
batched_prs = [batch.prs for batch in staging.batch_ids]
|
|
||||||
staging.batch_ids.write({'active': False})
|
|
||||||
staging.active = False
|
|
||||||
elif rows:
|
elif rows:
|
||||||
# p=1 or p=2
|
# p=1 or p=2
|
||||||
batched_prs = [PRs.browse(pr_ids) for _, pr_ids in takewhile(lambda r: r[0] == priority, rows)]
|
batched_prs = [PRs.browse(pr_ids) for _, pr_ids in takewhile(lambda r: r[0] == priority, rows)]
|
||||||
@ -338,12 +335,12 @@ class Branch(models.Model):
|
|||||||
name = fields.Char(required=True)
|
name = fields.Char(required=True)
|
||||||
project_id = fields.Many2one('runbot_merge.project', required=True)
|
project_id = fields.Many2one('runbot_merge.project', required=True)
|
||||||
|
|
||||||
active_staging_id = fields.One2many(
|
active_staging_id = fields.Many2one(
|
||||||
'runbot_merge.stagings', 'target',
|
'runbot_merge.stagings', compute='_compute_active_staging', store=True,
|
||||||
domain=[("heads", "!=", False), ('active', '=', True)],
|
help="Currently running staging for the branch."
|
||||||
help="Currently running staging for the branch, there should be only one"
|
|
||||||
)
|
)
|
||||||
staging_ids = fields.One2many('runbot_merge.stagings', 'target')
|
staging_ids = fields.One2many('runbot_merge.stagings', 'target')
|
||||||
|
split_ids = fields.One2many('runbot_merge.split', 'target')
|
||||||
|
|
||||||
prs = fields.One2many('runbot_merge.pull_requests', 'target', domain=[
|
prs = fields.One2many('runbot_merge.pull_requests', 'target', domain=[
|
||||||
('state', '!=', 'closed'),
|
('state', '!=', 'closed'),
|
||||||
@ -357,6 +354,11 @@ class Branch(models.Model):
|
|||||||
self._table, ['name', 'project_id'])
|
self._table, ['name', 'project_id'])
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
@api.depends('staging_ids.active')
|
||||||
|
def _compute_active_staging(self):
|
||||||
|
for b in self:
|
||||||
|
b.active_staging_id = b.staging_ids
|
||||||
|
|
||||||
class PullRequests(models.Model):
|
class PullRequests(models.Model):
|
||||||
_name = 'runbot_merge.pull_requests'
|
_name = 'runbot_merge.pull_requests'
|
||||||
_order = 'number desc'
|
_order = 'number desc'
|
||||||
@ -720,10 +722,8 @@ class Stagings(models.Model):
|
|||||||
staged_at = fields.Datetime(default=fields.Datetime.now)
|
staged_at = fields.Datetime(default=fields.Datetime.now)
|
||||||
restaged = fields.Integer(default=0)
|
restaged = fields.Integer(default=0)
|
||||||
|
|
||||||
# seems simpler than adding yet another indirection through a model and
|
# seems simpler than adding yet another indirection through a model
|
||||||
# makes checking for actually staged stagings way easier: just see if
|
heads = fields.Char(required=True, help="JSON-encoded map of heads, one per repo in the project")
|
||||||
# heads is set
|
|
||||||
heads = fields.Char(help="JSON-encoded map of heads, one per repo in the project")
|
|
||||||
|
|
||||||
def _validate(self):
|
def _validate(self):
|
||||||
Commits = self.env['runbot_merge.commit']
|
Commits = self.env['runbot_merge.commit']
|
||||||
@ -755,7 +755,7 @@ class Stagings(models.Model):
|
|||||||
|
|
||||||
_logger.info(reason, *args)
|
_logger.info(reason, *args)
|
||||||
self.batch_ids.write({'active': False})
|
self.batch_ids.write({'active': False})
|
||||||
self.write({'active': False})
|
self.active = False
|
||||||
|
|
||||||
def fail(self, message, prs=None):
|
def fail(self, message, prs=None):
|
||||||
_logger.error("Staging %s failed: %s", self, message)
|
_logger.error("Staging %s failed: %s", self, message)
|
||||||
@ -766,24 +766,25 @@ class Stagings(models.Model):
|
|||||||
pr.number, "Staging failed: %s" % message)
|
pr.number, "Staging failed: %s" % message)
|
||||||
|
|
||||||
self.batch_ids.write({'active': False})
|
self.batch_ids.write({'active': False})
|
||||||
self.write({'active': False})
|
self.active = False
|
||||||
|
|
||||||
def try_splitting(self):
|
def try_splitting(self):
|
||||||
batches = len(self.batch_ids)
|
batches = len(self.batch_ids)
|
||||||
if batches > 1:
|
if batches > 1:
|
||||||
midpoint = batches // 2
|
midpoint = batches // 2
|
||||||
h, t = self.batch_ids[:midpoint], self.batch_ids[midpoint:]
|
h, t = self.batch_ids[:midpoint], self.batch_ids[midpoint:]
|
||||||
sh = self.env['runbot_merge.stagings'].create({
|
# NB: batches remain attached to their original staging
|
||||||
|
sh = self.env['runbot_merge.split'].create({
|
||||||
'target': self.target.id,
|
'target': self.target.id,
|
||||||
'batch_ids': [(4, batch.id, 0) for batch in h],
|
'batch_ids': [(4, batch.id, 0) for batch in h],
|
||||||
})
|
})
|
||||||
st = self.env['runbot_merge.stagings'].create({
|
st = self.env['runbot_merge.split'].create({
|
||||||
'target': self.target.id,
|
'target': self.target.id,
|
||||||
'batch_ids': [(4, batch.id, 0) for batch in t],
|
'batch_ids': [(4, batch.id, 0) for batch in t],
|
||||||
})
|
})
|
||||||
_logger.info("Split %s to %s (%s) and %s (%s)",
|
_logger.info("Split %s to %s (%s) and %s (%s)",
|
||||||
self, h, sh, t, st)
|
self, h, sh, t, st)
|
||||||
# apoptosis
|
self.batch_ids.write({'active': False})
|
||||||
self.active = False
|
self.active = False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -819,6 +820,12 @@ class Stagings(models.Model):
|
|||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
class Split(models.Model):
|
||||||
|
_name = 'runbot_merge.split'
|
||||||
|
|
||||||
|
target = fields.Many2one('runbot_merge.branch', required=True)
|
||||||
|
batch_ids = fields.One2many('runbot_merge.batch', 'split_id', context={'active_test': False})
|
||||||
|
|
||||||
class Batch(models.Model):
|
class Batch(models.Model):
|
||||||
""" A batch is a "horizontal" grouping of *codependent* PRs: PRs with
|
""" A batch is a "horizontal" grouping of *codependent* PRs: PRs with
|
||||||
the same label & target but for different repositories. These are
|
the same label & target but for different repositories. These are
|
||||||
@ -830,6 +837,7 @@ class Batch(models.Model):
|
|||||||
|
|
||||||
target = fields.Many2one('runbot_merge.branch', required=True)
|
target = fields.Many2one('runbot_merge.branch', required=True)
|
||||||
staging_id = fields.Many2one('runbot_merge.stagings')
|
staging_id = fields.Many2one('runbot_merge.stagings')
|
||||||
|
split_id = fields.Many2one('runbot_merge.split')
|
||||||
|
|
||||||
prs = fields.Many2many('runbot_merge.pull_requests')
|
prs = fields.Many2many('runbot_merge.pull_requests')
|
||||||
|
|
||||||
|
@ -1050,12 +1050,14 @@ class TestBatching(object):
|
|||||||
pr2 = env['runbot_merge.pull_requests'].search([('number', '=', pr2.number)])
|
pr2 = env['runbot_merge.pull_requests'].search([('number', '=', pr2.number)])
|
||||||
|
|
||||||
env['runbot_merge.project']._check_progress()
|
env['runbot_merge.project']._check_progress()
|
||||||
# should have split the existing batch into two
|
# should have split the existing batch into two, with one of the
|
||||||
assert len(env['runbot_merge.stagings'].search([])) == 2
|
# splits having been immediately restaged
|
||||||
assert pr1.staging_id and pr2.staging_id
|
st = env['runbot_merge.stagings'].search([])
|
||||||
assert pr1.staging_id != pr2.staging_id
|
assert len(st) == 1
|
||||||
assert pr1.staging_id.heads
|
assert pr1.staging_id and pr1.staging_id == st
|
||||||
assert not pr2.staging_id.heads
|
|
||||||
|
sp = env['runbot_merge.split'].search([])
|
||||||
|
assert len(sp) == 1
|
||||||
|
|
||||||
# This is the failing PR!
|
# This is the failing PR!
|
||||||
h = repo.commit('heads/staging.master').id
|
h = repo.commit('heads/staging.master').id
|
||||||
@ -1063,7 +1065,8 @@ class TestBatching(object):
|
|||||||
repo.post_status(h, 'success', 'legal/cla')
|
repo.post_status(h, 'success', 'legal/cla')
|
||||||
env['runbot_merge.project']._check_progress()
|
env['runbot_merge.project']._check_progress()
|
||||||
assert pr1.state == 'error'
|
assert pr1.state == 'error'
|
||||||
assert pr2.staging_id.heads
|
|
||||||
|
assert pr2.staging_id
|
||||||
|
|
||||||
h = repo.commit('heads/staging.master').id
|
h = repo.commit('heads/staging.master').id
|
||||||
repo.post_status(h, 'success', 'ci/runbot')
|
repo.post_status(h, 'success', 'ci/runbot')
|
||||||
|
@ -302,7 +302,7 @@ def test_batching_split(env, repo_a, repo_b):
|
|||||||
assert len(st0.batch_ids) == 5
|
assert len(st0.batch_ids) == 5
|
||||||
assert len(st0.mapped('batch_ids.prs')) == 8
|
assert len(st0.mapped('batch_ids.prs')) == 8
|
||||||
|
|
||||||
# mark b.staging as failed -> should create two new stagings with (0, 1)
|
# mark b.staging as failed -> should create two splits with (0, 1)
|
||||||
# and (2, 3, 4) and stage the first one
|
# and (2, 3, 4) and stage the first one
|
||||||
repo_b.post_status('heads/staging.master', 'success', 'legal/cla')
|
repo_b.post_status('heads/staging.master', 'success', 'legal/cla')
|
||||||
repo_b.post_status('heads/staging.master', 'failure', 'ci/runbot')
|
repo_b.post_status('heads/staging.master', 'failure', 'ci/runbot')
|
||||||
@ -310,21 +310,21 @@ def test_batching_split(env, repo_a, repo_b):
|
|||||||
env['runbot_merge.project']._check_progress()
|
env['runbot_merge.project']._check_progress()
|
||||||
|
|
||||||
assert not st0.active
|
assert not st0.active
|
||||||
sts = env['runbot_merge.stagings'].search([])
|
|
||||||
assert len(sts) == 2
|
|
||||||
st1, st2 = sts
|
|
||||||
# a bit oddly st1 is probably the (2,3,4) one: the split staging for
|
|
||||||
# (0, 1) has been "exploded" and a new staging was created for it
|
|
||||||
assert not st1.heads
|
|
||||||
assert len(st1.batch_ids) == 3
|
|
||||||
assert st1.mapped('batch_ids.prs') == \
|
|
||||||
prs[2][0] | prs[2][1] | prs[3][0] | prs[3][1] | prs[4][0]
|
|
||||||
|
|
||||||
assert st2.heads
|
# at this point we have a re-staged split and an unstaged split
|
||||||
assert len(st2.batch_ids) == 2
|
st = env['runbot_merge.stagings'].search([])
|
||||||
assert st2.mapped('batch_ids.prs') == \
|
sp = env['runbot_merge.split'].search([])
|
||||||
|
assert st
|
||||||
|
assert sp
|
||||||
|
|
||||||
|
assert len(st.batch_ids) == 2
|
||||||
|
assert st.mapped('batch_ids.prs') == \
|
||||||
prs[0][0] | prs[0][1] | prs[1][1]
|
prs[0][0] | prs[0][1] | prs[1][1]
|
||||||
|
|
||||||
|
assert len(sp.batch_ids) == 3
|
||||||
|
assert sp.mapped('batch_ids.prs') == \
|
||||||
|
prs[2][0] | prs[2][1] | prs[3][0] | prs[3][1] | prs[4][0]
|
||||||
|
|
||||||
def test_urgent(env, repo_a, repo_b):
|
def test_urgent(env, repo_a, repo_b):
|
||||||
""" Either PR of a co-dependent pair being p=0 leads to the entire pair
|
""" Either PR of a co-dependent pair being p=0 leads to the entire pair
|
||||||
being prioritized
|
being prioritized
|
||||||
|
Loading…
Reference in New Issue
Block a user