mirror of
https://github.com/odoo/runbot.git
synced 2025-03-15 15:35:46 +07:00
[FIX] runbot_merge: leftover direct setting of PR state
Setting the PR state directly really doesn't work as it doesn't correctly save (and can get overwritten by any dependency of which there are many). This caused setting odoo/odoo#165777 in error to fail, leading to it being re-staged (and failing) repeatedly, and the PR being spammed with comments. - create a more formal helper for preventing directly setting computed functions (without an actual inverse) - replace direct state setting by setting the corresponding dependency e.g. `error` for error and `skipchecks` to force a PR to ready - add a `skipchecks` inverse to the PR so it can also set itself as reviewed, and is convenient, might be worth also adding stuff to `Batch.write`
This commit is contained in:
parent
e320de0439
commit
60c4b5141d
@ -22,7 +22,7 @@ from odoo.exceptions import AccessError
|
||||
from odoo.osv import expression
|
||||
from odoo.tools import html_escape, Reverse
|
||||
from . import commands
|
||||
from .utils import enum
|
||||
from .utils import enum, readonly
|
||||
|
||||
from .. import github, exceptions, controllers, utils
|
||||
|
||||
@ -328,11 +328,11 @@ class PullRequests(models.Model):
|
||||
|
||||
closed = fields.Boolean(default=False, tracking=True)
|
||||
error = fields.Boolean(string="in error", default=False, tracking=True)
|
||||
skipchecks = fields.Boolean(related='batch_id.skipchecks')
|
||||
skipchecks = fields.Boolean(related='batch_id.skipchecks', inverse='_inverse_skipchecks')
|
||||
cancel_staging = fields.Boolean(related='batch_id.cancel_staging')
|
||||
merge_date = fields.Datetime(
|
||||
related='batch_id.merge_date',
|
||||
inverse=lambda _: 1/0,
|
||||
inverse=readonly,
|
||||
tracking=True,
|
||||
store=True,
|
||||
)
|
||||
@ -347,8 +347,13 @@ class PullRequests(models.Model):
|
||||
('merged', 'Merged'),
|
||||
('error', 'Error'),
|
||||
],
|
||||
compute='_compute_state', store=True, default='opened',
|
||||
index=True, tracking=True, column_type=enum(_name, 'state'),
|
||||
compute='_compute_state',
|
||||
inverse=readonly,
|
||||
readonly=True,
|
||||
store=True,
|
||||
index=True,
|
||||
tracking=True,
|
||||
column_type=enum(_name, 'state'),
|
||||
)
|
||||
|
||||
number = fields.Integer(required=True, index=True, group_operator=None)
|
||||
@ -376,7 +381,7 @@ class PullRequests(models.Model):
|
||||
|
||||
reviewed_by = fields.Many2one('res.partner', index=True, tracking=True)
|
||||
delegates = fields.Many2many('res.partner', help="Delegate reviewers, not intrinsically reviewers but can review this PR")
|
||||
priority = fields.Selection(related="batch_id.priority", inverse=lambda _: 1 / 0)
|
||||
priority = fields.Selection(related="batch_id.priority", inverse=readonly)
|
||||
|
||||
overrides = fields.Char(required=True, default='{}', tracking=True)
|
||||
statuses = fields.Text(help="Copy of the statuses from the HEAD commit, as a Python literal", default="{}")
|
||||
@ -389,12 +394,12 @@ class PullRequests(models.Model):
|
||||
('pending', 'Pending'),
|
||||
('failure', 'Failure'),
|
||||
('success', 'Success'),
|
||||
], compute='_compute_statuses', store=True, column_type=enum(_name, 'status'))
|
||||
], compute='_compute_statuses', store=True, inverse=readonly, column_type=enum(_name, 'status'))
|
||||
previous_failure = fields.Char(default='{}')
|
||||
|
||||
batch_id = fields.Many2one('runbot_merge.batch', index=True)
|
||||
staging_id = fields.Many2one('runbot_merge.stagings', compute='_compute_staging', store=True)
|
||||
staging_ids = fields.Many2many('runbot_merge.stagings', string="Stagings", compute='_compute_stagings', context={"active_test": False})
|
||||
staging_id = fields.Many2one('runbot_merge.stagings', compute='_compute_staging', inverse=readonly, store=True)
|
||||
staging_ids = fields.Many2many('runbot_merge.stagings', string="Stagings", compute='_compute_stagings', inverse=readonly, context={"active_test": False})
|
||||
|
||||
@api.depends('batch_id.batch_staging_ids.runbot_merge_stagings_id.active')
|
||||
def _compute_staging(self):
|
||||
@ -486,6 +491,12 @@ class PullRequests(models.Model):
|
||||
def _compute_display_name(self):
|
||||
return super()._compute_display_name()
|
||||
|
||||
def _inverse_skipchecks(self):
|
||||
for p in self:
|
||||
p.batch_id.skipchecks = p.skipchecks
|
||||
if p.skipchecks:
|
||||
p.reviewed_by = self.env.user.partner_id
|
||||
|
||||
def name_get(self):
|
||||
name_template = '%(repo_name)s#%(number)d'
|
||||
if self.env.context.get('pr_include_title'):
|
||||
|
@ -270,7 +270,7 @@ def stage_batches(branch: Branch, batches: Batch, staging_state: StagingState) -
|
||||
with contextlib.suppress(Exception):
|
||||
reason = json.loads(str(reason))['message'].lower()
|
||||
|
||||
pr.state = 'error'
|
||||
pr.error = True
|
||||
env.ref('runbot_merge.pr.merge.failed')._send(
|
||||
repository=pr.repository,
|
||||
pull_request=pr.number,
|
||||
|
@ -4,3 +4,7 @@ from typing import Tuple
|
||||
def enum(model: str, field: str) -> Tuple[str, str]:
|
||||
n = f'{model.replace(".", "_")}_{field}_type'
|
||||
return n, n
|
||||
|
||||
|
||||
def readonly(_):
|
||||
raise TypeError("Field is readonly")
|
||||
|
@ -2988,7 +2988,7 @@ class TestBatching(object):
|
||||
pr01 = self._pr(repo, 'Urgent1', [{'n': 'n'}, {'o': 'o'}], user=config['role_user']['token'], reviewer=None, statuses=[])
|
||||
pr01.post_comment('hansen NOW!', config['role_reviewer']['token'])
|
||||
p_01 = to_pr(env, pr01)
|
||||
p_01.state = 'error'
|
||||
p_01.error = True
|
||||
|
||||
env.run_crons()
|
||||
assert not p_01.staging_id, "p_01 should not be picked up as it's failed"
|
||||
|
@ -211,16 +211,22 @@ def test_merge_emptying_commits(env, repo, users, config):
|
||||
|
||||
ping = f"@{users['user']} @{users['reviewer']}"
|
||||
# check that first / sole commit emptying is caught
|
||||
assert not to_pr(env, pr1).staging_id
|
||||
pr1_id = to_pr(env, pr1)
|
||||
assert not pr1_id.staging_id
|
||||
assert pr1.comments[3:] == [
|
||||
(users['user'], f"{ping} unable to stage: commit {c1} results in an empty tree when merged, it is likely a duplicate of a merged commit, rebase and remove.")
|
||||
]
|
||||
assert pr1_id.error
|
||||
assert pr1_id.state == 'error'
|
||||
|
||||
# check that followup commit emptying is caught
|
||||
assert not to_pr(env, pr2).staging_id
|
||||
pr2_id = to_pr(env, pr2)
|
||||
assert not pr2_id.staging_id
|
||||
assert pr2.comments[3:] == [
|
||||
(users['user'], f"{ping} unable to stage: commit {c2} results in an empty tree when merged, it is likely a duplicate of a merged commit, rebase and remove.")
|
||||
]
|
||||
assert pr2_id.error
|
||||
assert pr2_id.state == 'error'
|
||||
|
||||
# check that emptied squashed pr is caught
|
||||
pr3_id = to_pr(env, pr3)
|
||||
@ -228,6 +234,17 @@ def test_merge_emptying_commits(env, repo, users, config):
|
||||
assert pr3.comments[3:] == [
|
||||
(users['user'], f"{ping} unable to stage: results in an empty tree when merged, might be the duplicate of a merged PR.")
|
||||
]
|
||||
assert pr3_id.error
|
||||
assert pr3_id.state == 'error'
|
||||
|
||||
# ensure the PR does not get re-staged since it's the first of the staging
|
||||
# (it's the only one)
|
||||
env.run_crons()
|
||||
assert pr1.comments[3:] == [
|
||||
(users['user'], f"{ping} unable to stage: commit {c1} results in an empty tree when merged, it is likely a duplicate of a merged commit, rebase and remove.")
|
||||
]
|
||||
assert len(pr2.comments) == 4
|
||||
assert len(pr3.comments) == 4
|
||||
|
||||
def test_force_ready(env, repo, config):
|
||||
with repo:
|
||||
@ -238,7 +255,7 @@ def test_force_ready(env, repo, config):
|
||||
env.run_crons()
|
||||
|
||||
pr_id = to_pr(env, pr)
|
||||
pr_id.state = 'ready'
|
||||
pr_id.skipchecks = True
|
||||
|
||||
assert pr_id.state == 'ready'
|
||||
assert pr_id.status == 'pending'
|
||||
|
Loading…
Reference in New Issue
Block a user