[CHG] runbot_merge: move priority field from PR to batch

Simplifies the `ready_prs` query a bit and allows it to be converted
to an ORM search, by moving the priority check outside. This also
allows the caller to not need to post-process the records list
anywhere near the previous state of affairs.

`ready_prs` now returns *either* the "alone" batches, or the non-alone
batches, rather than mixing both into a single sequence. This requires
correctly applying the search filters to not retrieve priority of
batches in error or targeting other branches.
This commit is contained in:
Xavier Morel 2024-02-07 15:05:33 +01:00
parent ef6a002ea7
commit 83511f45e2
5 changed files with 67 additions and 32 deletions

View File

@ -1,6 +1,8 @@
from __future__ import annotations from __future__ import annotations
from odoo import models, fields, api from odoo import models, fields, api
from odoo.tools import create_index
from .utils import enum
class Batch(models.Model): class Batch(models.Model):
@ -36,9 +38,38 @@ class Batch(models.Model):
default=False, tracking=True, default=False, tracking=True,
help="Cancels current staging on target branch when becoming ready" help="Cancels current staging on target branch when becoming ready"
) )
priority = fields.Selection([
('default', "Default"),
('priority', "Priority"),
('alone', "Alone"),
], default='default', index=True, group_operator=None, required=True,
column_type=enum(_name, 'priority'),
)
blocked = fields.Char(store=True, compute="_compute_stageable") blocked = fields.Char(store=True, compute="_compute_stageable")
def _auto_init(self):
for field in self._fields.values():
if not isinstance(field, fields.Selection) or field.column_type[0] == 'varchar':
continue
t = field.column_type[1]
self.env.cr.execute("SELECT FROM pg_type WHERE typname = %s", [t])
if not self.env.cr.rowcount:
self.env.cr.execute(
f"CREATE TYPE {t} AS ENUM %s",
[tuple(s for s, _ in field.selection)]
)
super()._auto_init()
create_index(
self.env.cr,
"runbot_merge_batch_unblocked_idx",
"runbot_merge_batch",
["(blocked is null), priority"],
)
@api.depends( @api.depends(
"merge_date", "merge_date",
"prs.error", "prs.draft", "prs.squash", "prs.merge_method", "prs.error", "prs.draft", "prs.squash", "prs.merge_method",

View File

@ -10,7 +10,7 @@ import logging
import re import re
import time import time
from functools import reduce from functools import reduce
from typing import Optional, Union, List, Iterator, Tuple from typing import Optional, Union, List, Iterator
import sentry_sdk import sentry_sdk
import werkzeug import werkzeug
@ -19,6 +19,7 @@ from odoo import api, fields, models, tools, Command
from odoo.osv import expression from odoo.osv import expression
from odoo.tools import html_escape from odoo.tools import html_escape
from . import commands from . import commands
from .utils import enum
from .. import github, exceptions, controllers, utils from .. import github, exceptions, controllers, utils
@ -307,9 +308,6 @@ class Branch(models.Model):
ACL = collections.namedtuple('ACL', 'is_admin is_reviewer is_author') ACL = collections.namedtuple('ACL', 'is_admin is_reviewer is_author')
def enum(model: str, field: str) -> Tuple[str, str]:
n = f'{model.replace(".", "_")}_{field}_type'
return n, n
class PullRequests(models.Model): class PullRequests(models.Model):
_name = 'runbot_merge.pull_requests' _name = 'runbot_merge.pull_requests'
_description = "Pull Request" _description = "Pull Request"
@ -374,13 +372,7 @@ class PullRequests(models.Model):
reviewed_by = fields.Many2one('res.partner', index=True, tracking=True) reviewed_by = fields.Many2one('res.partner', index=True, tracking=True)
delegates = fields.Many2many('res.partner', help="Delegate reviewers, not intrinsically reviewers but can review this PR") delegates = fields.Many2many('res.partner', help="Delegate reviewers, not intrinsically reviewers but can review this PR")
priority = fields.Selection([ priority = fields.Selection(related="batch_id.priority", inverse=lambda _: 1 / 0)
('default', "Default"),
('priority', "Priority"),
('alone', "Alone"),
], default='default', index=True, group_operator=None, required=True,
column_type=enum(_name, 'priority'),
)
overrides = fields.Char(required=True, default='{}', tracking=True) overrides = fields.Char(required=True, default='{}', tracking=True)
statuses = fields.Text(help="Copy of the statuses from the HEAD commit, as a Python literal", default="{}") statuses = fields.Text(help="Copy of the statuses from the HEAD commit, as a Python literal", default="{}")
@ -755,7 +747,7 @@ class PullRequests(models.Model):
}) })
delegates.write({'delegate_reviewer': [(4, self.id, 0)]}) delegates.write({'delegate_reviewer': [(4, self.id, 0)]})
case commands.Priority() if is_admin: case commands.Priority() if is_admin:
self.priority = str(command) self.batch_id.priority = str(command)
case commands.SkipChecks() if is_admin: case commands.SkipChecks() if is_admin:
self.batch_id.skipchecks = True self.batch_id.skipchecks = True
self.reviewed_by = author self.reviewed_by = author
@ -1014,7 +1006,7 @@ class PullRequests(models.Model):
[tuple(s for s, _ in field.selection)] [tuple(s for s, _ in field.selection)]
) )
super(PullRequests, self)._auto_init() super()._auto_init()
# incorrect index: unique(number, target, repository). # incorrect index: unique(number, target, repository).
tools.drop_index(self._cr, 'runbot_merge_unique_pr_per_target', self._table) tools.drop_index(self._cr, 'runbot_merge_unique_pr_per_target', self._table)
# correct index: # correct index:

View File

@ -61,11 +61,9 @@ def try_staging(branch: Branch) -> Optional[Stagings]:
def log(label: str, batches: Batch) -> None: def log(label: str, batches: Batch) -> None:
_logger.info(label, ', '.join(batches.mapped('prs.display_name'))) _logger.info(label, ', '.join(batches.mapped('prs.display_name')))
rows = ready_prs(for_branch=branch) alone, batches = ready_prs(for_branch=branch)
priority = rows[0][0] if rows else None print('alone', alone, 'ready', batches)
concat = branch.env['runbot_merge.batch'].concat if alone:
if priority == 'alone':
batches: Batch = concat(*(batch for _, batch in takewhile(lambda r: r[0] == priority, rows)))
log("staging high-priority PRs %s", batches) log("staging high-priority PRs %s", batches)
elif branch.project_id.staging_priority == 'default': elif branch.project_id.staging_priority == 'default':
if split := branch.split_ids[:1]: if split := branch.split_ids[:1]:
@ -75,7 +73,6 @@ def try_staging(branch: Branch) -> Optional[Stagings]:
else: else:
# priority, normal; priority = sorted ahead of normal, so always picked # priority, normal; priority = sorted ahead of normal, so always picked
# first as long as there's room # first as long as there's room
batches = concat(*(batch for _, batch in rows))
log("staging ready PRs %s (prioritising splits)", batches) log("staging ready PRs %s (prioritising splits)", batches)
elif branch.project_id.staging_priority == 'ready': elif branch.project_id.staging_priority == 'ready':
# splits are ready by definition, we need to exclude them from the # splits are ready by definition, we need to exclude them from the
@ -83,7 +80,7 @@ def try_staging(branch: Branch) -> Optional[Stagings]:
# we cycle forever # we cycle forever
# FIXME: once the batches are less shit, store this durably on the # FIXME: once the batches are less shit, store this durably on the
# batches and filter out when fetching readies (?) # batches and filter out when fetching readies (?)
batches = concat(*(batch for _, batch in rows)) - branch.split_ids.batch_ids batches -= branch.split_ids.batch_ids
if batches: if batches:
log("staging ready PRs %s (prioritising ready)", batches) log("staging ready PRs %s (prioritising ready)", batches)
@ -97,7 +94,7 @@ def try_staging(branch: Branch) -> Optional[Stagings]:
# splits are ready by definition, we need to exclude them from the # splits are ready by definition, we need to exclude them from the
# ready rows otherwise ready always wins but we re-stage the splits, so # ready rows otherwise ready always wins but we re-stage the splits, so
# if an error is legit we'll cycle forever # if an error is legit we'll cycle forever
batches = concat(*(batch for _, batch in rows)) - branch.split_ids.batch_ids batches -= branch.split_ids.batch_ids
maxsplit = max(branch.split_ids, key=lambda s: len(s.batch_ids), default=branch.env['runbot_merge.split']) maxsplit = max(branch.split_ids, key=lambda s: len(s.batch_ids), default=branch.env['runbot_merge.split'])
_logger.info("largest split = %d, ready = %d", len(maxsplit.batch_ids), len(batches)) _logger.info("largest split = %d, ready = %d", len(maxsplit.batch_ids), len(batches))
@ -204,20 +201,23 @@ For-Commit-Id: {it.head}
return st return st
def ready_prs(for_branch: Branch) -> List[Tuple[int, Batch]]: def ready_prs(for_branch: Branch) -> Tuple[bool, Batch]:
env = for_branch.env env = for_branch.env
env.cr.execute(""" env.cr.execute("""
SELECT SELECT max(priority)
max(pr.priority) as priority, FROM runbot_merge_batch
b.id as batch WHERE blocked IS NULL AND target = %s
FROM runbot_merge_batch b
JOIN runbot_merge_pull_requests pr ON (b.id = pr.batch_id)
WHERE b.target = %s AND b.blocked IS NULL
GROUP BY b.id
ORDER BY max(pr.priority) DESC, min(b.id)
""", [for_branch.id]) """, [for_branch.id])
browse = env['runbot_merge.batch'].browse alone = env.cr.fetchone()[0] == 'alone'
return [(p, browse(id)) for p, id in env.cr.fetchall()]
return (
alone,
env['runbot_merge.batch'].search([
('target', '=', for_branch.id),
('blocked', '=', False),
('priority', '=', 'alone') if alone else (1, '=', 1),
], order="priority DESC, id ASC"),
)
def staging_setup( def staging_setup(

View File

@ -0,0 +1,6 @@
from typing import Tuple
def enum(model: str, field: str) -> Tuple[str, str]:
n = f'{model.replace(".", "_")}_{field}_type'
return n, n

View File

@ -2820,11 +2820,17 @@ class TestBatching(object):
assert not staging_1.active assert not staging_1.active
assert not p_11.staging_id and not p_12.staging_id assert not p_11.staging_id and not p_12.staging_id
assert p_01.staging_id assert p_01.staging_id
assert p_11.state == 'ready'
assert p_12.state == 'ready'
# make the staging fail # make the staging fail
with repo: with repo:
repo.post_status('staging.master', 'failure', 'ci/runbot') repo.post_status('staging.master', 'failure', 'ci/runbot')
env.run_crons() env.run_crons()
assert p_01.error
assert p_01.batch_id.blocked
assert p_01.blocked
assert p_01.state == 'error' assert p_01.state == 'error'
assert not p_01.staging_id.active assert not p_01.staging_id.active
staging_2 = ensure_one(sm_all.staging_id) staging_2 = ensure_one(sm_all.staging_id)