[REF] runbot_merge: compute staging state

Rather than compute staging state directly from commit statuses, copy
statuses into the staging's `statuses_cache` then compute state based
on that. Also refactor `statuses` and `staging_end` to be computed
based on the statuses cache instead of the commits (or `state`
update).

The goal is to allow non-webhook validation of stagings, for direct
communications between the mergebot and the CI (mostly runbot).
This commit is contained in:
Xavier Morel 2024-05-31 12:33:13 +02:00
parent 68cfeddaed
commit 44084e303c
2 changed files with 39 additions and 27 deletions

View File

@ -32,6 +32,7 @@ class Project(models.Model):
('largest', "Largest of split and ready PRs"),
('ready', "Ready PRs over split"),
], default="default", required=True)
staging_statuses = fields.Boolean(default=True)
ci_timeout = fields.Integer(
default=60, required=True, group_operator=None,

View File

@ -1740,9 +1740,13 @@ class Commit(models.Model):
f"statuses changed on {c.sha}"
pr._validate(c.statuses)
stagings = Stagings.search([('head_ids.sha', '=', c.sha), ('state', '=', 'pending')])
stagings = Stagings.search([
('head_ids.sha', '=', c.sha),
('state', '=', 'pending'),
('target.project_id.staging_statuses', '=', True),
])
if stagings:
stagings._validate()
stagings._notify(c)
except Exception:
_logger.exception("Failed to apply commit %s (%s)", c, c.sha)
self.env.cr.rollback()
@ -1792,11 +1796,11 @@ class Stagings(models.Model):
('pending', 'Pending'),
('cancelled', "Cancelled"),
('ff_failed', "Fast forward failed")
], default='pending', index=True)
], default='pending', index=True, store=True, compute='_compute_state')
active = fields.Boolean(default=True)
staged_at = fields.Datetime(default=fields.Datetime.now, index=True)
staging_end = fields.Datetime()
staging_end = fields.Datetime(store=True, compute='_compute_state')
staging_duration = fields.Float(compute='_compute_duration')
timeout_limit = fields.Datetime(store=True, compute='_compute_timeout_limit')
reason = fields.Text("Reason for final state (if any)")
@ -1807,23 +1811,13 @@ class Stagings(models.Model):
commits = fields.One2many('runbot_merge.stagings.commits', 'staging_id')
statuses = fields.Binary(compute='_compute_statuses')
statuses_cache = fields.Text()
statuses_cache = fields.Text(default='{}', required=True)
def write(self, vals):
# don't allow updating the statuses_cache
vals.pop('statuses_cache', None)
if 'state' not in vals:
return super().write(vals)
previously_pending = self.filtered(lambda s: s.state == 'pending')
super().write(vals)
for staging in previously_pending:
if staging.state != 'pending':
super(Stagings, staging).write({
'staging_end': fields.Datetime.now(),
'statuses_cache': json.dumps(staging.statuses),
})
return True
@ -1851,7 +1845,7 @@ class Stagings(models.Model):
def _search_batch_ids(self, operator, value):
return [('staging_batch_ids.runbot_merge_batch_id', operator, value)]
@api.depends('heads')
@api.depends('heads', 'statuses_cache')
def _compute_statuses(self):
""" Fetches statuses associated with the various heads, returned as
(repo, context, state, url)
@ -1860,9 +1854,7 @@ class Stagings(models.Model):
all_heads = self.mapped('head_ids')
for st in self:
if st.statuses_cache:
st.statuses = json.loads(st.statuses_cache)
continue
statuses = json.loads(st.statuses_cache)
commits = st.head_ids.with_prefetch(all_heads._prefetch_ids)
st.statuses = [
@ -1873,7 +1865,7 @@ class Stagings(models.Model):
status.get('target_url') or ''
)
for commit in commits
for context, status in json.loads(commit.statuses).items()
for context, status in statuses.get(commit.sha, {}).items()
]
# only depend on staged_at as it should not get modified, but we might
@ -1892,7 +1884,26 @@ class Stagings(models.Model):
for staging in self:
staging.pr_ids = staging.batch_ids.prs
def _validate(self):
def _notify(self, c: Commit) -> None:
self.env.cr.execute("""
UPDATE runbot_merge_stagings
SET statuses_cache = CASE
WHEN statuses_cache::jsonb->%(sha)s IS NULL
THEN jsonb_insert(statuses_cache::jsonb, ARRAY[%(sha)s], %(statuses)s::jsonb)
ELSE statuses_cache::jsonb || jsonb_build_object(%(sha)s, %(statuses)s::jsonb)
END::text
WHERE id = any(%(ids)s)
""", {'sha': c.sha, 'statuses': c.statuses, 'ids': self.ids})
self.modified(['statuses_cache'])
@api.depends(
"statuses_cache",
"target",
"heads.commit_id.sha",
"heads.repository_id.status_ids.branch_filter",
"heads.repository_id.status_ids.context",
)
def _compute_state(self):
for s in self:
if s.state != 'pending':
continue
@ -1902,8 +1913,7 @@ class Stagings(models.Model):
(h.commit_id.sha, h.repository_id.status_ids._for_staging(s).mapped('context'))
for h in s.heads
]
# maps commits to their statuses
cmap = {c.sha: json.loads(c.statuses) for c in s.head_ids}
cmap = json.loads(s.statuses_cache)
update_timeout_limit = False
st = 'success'
@ -1920,11 +1930,12 @@ class Stagings(models.Model):
else:
assert v == 'success'
vals = {'state': st}
s.state = st
if s.state != 'pending':
s.staging_end = fields.Datetime.now()
if update_timeout_limit:
vals['timeout_limit'] = fields.Datetime.to_string(datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout))
_logger.debug("%s got pending status, bumping timeout to %s (%s)", self, vals['timeout_limit'], cmap)
s.write(vals)
s.timeout_limit = fields.Datetime.to_string(datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout))
_logger.debug("%s got pending status, bumping timeout to %s (%s)", self, s.timeout_limit, cmap)
def action_cancel(self):
w = self.env['runbot_merge.stagings.cancel'].create({