[CHG] runbot_merge: move merge_date to batch, remove active

- `merge_date` should be common to an entire batch, so move it there
- remove `Batch.active` which should probably have been removed when
  batches were made persistent (can eventually re-add as a proxy for
  `merge_date` being set maybe, but for now removing it seems a better
  way to catch mistakes)
- update various sites to use `Batch.merge_date` instead of
  `Batch.active`
This commit is contained in:
Xavier Morel 2024-01-29 14:09:22 +01:00
parent e910b8e857
commit 21b5dd439b
4 changed files with 31 additions and 26 deletions

View File

@ -258,11 +258,6 @@ class PullRequests(models.Model):
token_field='fp_github_token',
format_args={'pr': parent, 'child': p},
)
if vals.get('merge_date'):
self.env['forwardport.branch_remover'].create([
{'pr_id': p.id}
for p in self
])
# if we change the policy to skip CI, schedule followups on existing FPs
if vals.get('fw_policy') == 'skipci' and self.state == 'merged':
self.env['runbot_merge.pull_requests'].search([
@ -344,8 +339,6 @@ class PullRequests(models.Model):
})
resumed = tip
else:
# reactivate batch
tip.batch_id.active = True
resumed = tip._schedule_fp_followup()
if resumed:
addendum += f', resuming forward-port stopped at {tip.display_name}'
@ -982,6 +975,21 @@ stderr:
}
)
class Batch(models.Model):
_inherit = 'runbot_merge.batch'
def write(self, vals):
if vals.get('merge_date'):
self.env['forwardport.branch_remover'].create([
{'pr_id': p.id}
for b in self
if not b.merge_date
for p in b.prs
])
super().write(vals)
return True
# ordering is a bit unintuitive because the lowest sequence (and name)
# is the last link of the fp chain, reasoning is a bit more natural the

View File

@ -20,8 +20,7 @@ class Batch(models.Model):
prs = fields.One2many('runbot_merge.pull_requests', 'batch_id')
active = fields.Boolean(default=True)
merge_date = fields.Datetime(tracking=True)
skipchecks = fields.Boolean(
string="Skips Checks",
default=False, tracking=True,

View File

@ -339,10 +339,8 @@ class FreezeWizard(models.Model):
)
all_prs = self.release_pr_ids.pr_id | self.bump_pr_ids.pr_id
all_prs.write({
'merge_date': fields.Datetime.now(),
'reviewed_by': self.env.user.partner_id.id,
})
all_prs.batch_id.merge_date = fields.Datetime.now()
all_prs.reviewed_by = self.env.user.partner_id.id
self.env['runbot_merge.pull_requests.feedback'].create([{
'repository': pr.repository.id,
'pull_request': pr.number,

View File

@ -15,8 +15,7 @@ from typing import Optional, Union, List, Iterator, Tuple
import sentry_sdk
import werkzeug
from odoo import api, fields, models, tools
from odoo.exceptions import ValidationError
from odoo import api, fields, models, tools, Command
from odoo.osv import expression
from odoo.tools import html_escape
from . import commands
@ -329,7 +328,12 @@ class PullRequests(models.Model):
error = fields.Boolean(string="in error", default=False, tracking=True)
skipchecks = fields.Boolean(related='batch_id.skipchecks')
cancel_staging = fields.Boolean(related='batch_id.cancel_staging')
merge_date = fields.Datetime(tracking=True)
merge_date = fields.Datetime(
related='batch_id.merge_date',
inverse=lambda _: 1/0,
tracking=True,
store=True,
)
state = fields.Selection([
('opened', 'Opened'),
@ -939,10 +943,10 @@ class PullRequests(models.Model):
pr.status = st
# closed, merged, error should be exclusive, so this should probably be a selection
@api.depends("status", "reviewed_by", 'merge_date', "closed", "error")
@api.depends("status", "reviewed_by", 'batch_id.merge_date', "closed", "error")
def _compute_state(self):
for pr in self:
if pr.merge_date:
if pr.batch_id.merge_date:
pr.state = 'merged'
elif pr.closed:
pr.state = "closed"
@ -1032,7 +1036,7 @@ class PullRequests(models.Model):
batch = Batches
else:
batch = Batches.search([
('active', '=', True),
('merge_date', '=', False),
('target', '=', target),
('prs.label', '=', label),
])
@ -1735,7 +1739,6 @@ class Stagings(models.Model):
return False
_logger.info("Cancelling staging %s: " + reason, self, *args)
self.mapped('batch_ids').write({'active': False})
self.write({
'active': False,
'state': 'cancelled',
@ -1756,7 +1759,6 @@ class Stagings(models.Model):
format_args={'pr': pr, 'message': message},
)
self.batch_ids.write({'active': False})
self.write({
'active': False,
'state': 'failure',
@ -1772,15 +1774,14 @@ class Stagings(models.Model):
# NB: batches remain attached to their original staging
sh = self.env['runbot_merge.split'].create({
'target': self.target.id,
'batch_ids': [(4, batch.id, 0) for batch in h],
'batch_ids': [Command.link(batch.id) for batch in h],
})
st = self.env['runbot_merge.split'].create({
'target': self.target.id,
'batch_ids': [(4, batch.id, 0) for batch in t],
'batch_ids': [Command.link(batch.id) for batch in t],
})
_logger.info("Split %s to %s (%s) and %s (%s)",
self, h, sh, t, st)
self.batch_ids.write({'active': False})
self.write({
'active': False,
'state': 'failure',
@ -1871,7 +1872,7 @@ class Stagings(models.Model):
"%s FF successful, marking %s as merged",
self, prs
)
prs.merge_date = fields.Datetime.now()
self.batch_ids.merge_date = fields.Datetime.now()
pseudobranch = None
if self.target == project.branch_ids[:1]:
@ -1893,7 +1894,6 @@ class Stagings(models.Model):
'tags_add': json.dumps([pseudobranch]),
})
finally:
self.batch_ids.write({'active': False})
self.write({'active': False})
elif self.state == 'failure' or self.is_timed_out():
self.try_splitting()