2019-08-23 21:16:30 +07:00
# -*- coding: utf-8 -*-
2024-06-10 23:47:49 +07:00
from datetime import datetime , timedelta
2022-10-28 13:03:12 +07:00
2019-08-23 21:16:30 +07:00
import pytest
2024-09-18 20:19:13 +07:00
from utils import seen , Commit , to_pr , make_basic , prevent_unstaging
2020-11-17 21:21:21 +07:00
2019-08-23 21:16:30 +07:00
def test_no_token ( env , config , make_repo ) :
""" if there ' s no token on the repo, nothing should break though should
log
"""
# create project configured with remotes on the repo but no token
2024-02-26 15:51:22 +07:00
prod , _ = make_basic ( env , config , make_repo , fp_token = False , fp_remote = True )
2019-08-23 21:16:30 +07:00
with prod :
prod . make_commits (
' a ' , Commit ( ' c0 ' , tree = { ' a ' : ' 0 ' } ) , ref = ' heads/abranch '
)
pr = prod . make_pr ( target = ' a ' , head = ' abranch ' )
prod . post_status ( pr . head , ' success ' , ' legal/cla ' )
prod . post_status ( pr . head , ' success ' , ' ci/runbot ' )
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
with prod :
prod . post_status ( ' staging.a ' , ' success ' , ' legal/cla ' )
prod . post_status ( ' staging.a ' , ' success ' , ' ci/runbot ' )
# wanted to use capfd, however it's not compatible with the subprocess
# being created beforehand and server() depending on capfd() would remove
# all its output from the normal pytest capture (dumped on test failure)
#
# so I'd really have to hand-roll the entire thing by having server()
# pipe stdout/stderr to temp files, yield those temp files, and have the
# tests mess around with reading those files, and finally have the server
# dump the file contents back to the test runner's stdout/stderr on
# fixture teardown...
env . run_crons ( )
assert len ( env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' ) ) == 1 , \
" should not have created forward port "
def test_remove_token ( env , config , make_repo ) :
2024-02-26 15:51:22 +07:00
prod , _ = make_basic ( env , config , make_repo )
env [ ' runbot_merge.project ' ] . search ( [ ] ) . fp_github_token = False
2019-08-23 21:16:30 +07:00
with prod :
prod . make_commits (
' a ' , Commit ( ' c0 ' , tree = { ' a ' : ' 0 ' } ) , ref = ' heads/abranch '
)
pr = prod . make_pr ( target = ' a ' , head = ' abranch ' )
prod . post_status ( pr . head , ' success ' , ' legal/cla ' )
prod . post_status ( pr . head , ' success ' , ' ci/runbot ' )
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
with prod :
prod . post_status ( ' staging.a ' , ' success ' , ' legal/cla ' )
prod . post_status ( ' staging.a ' , ' success ' , ' ci/runbot ' )
env . run_crons ( )
assert len ( env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' ) ) == 1 , \
" should not have created forward port "
def test_no_target ( env , config , make_repo ) :
2024-02-26 15:51:22 +07:00
prod , _ = make_basic ( env , config , make_repo , fp_remote = False )
2019-08-23 21:16:30 +07:00
with prod :
prod . make_commits (
' a ' , Commit ( ' c0 ' , tree = { ' a ' : ' 0 ' } ) , ref = ' heads/abranch '
)
pr = prod . make_pr ( target = ' a ' , head = ' abranch ' )
prod . post_status ( pr . head , ' success ' , ' legal/cla ' )
prod . post_status ( pr . head , ' success ' , ' ci/runbot ' )
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
with prod :
prod . post_status ( ' staging.a ' , ' success ' , ' legal/cla ' )
prod . post_status ( ' staging.a ' , ' success ' , ' ci/runbot ' )
env . run_crons ( )
assert len ( env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' ) ) == 1 , \
" should not have created forward port "
2019-10-02 01:51:31 +07:00
def test_failed_staging ( env , config , make_repo ) :
2024-02-26 15:51:22 +07:00
prod , _ = make_basic ( env , config , make_repo )
2019-10-02 01:51:31 +07:00
reviewer = config [ ' role_reviewer ' ] [ ' token ' ]
with prod :
prod . make_commits ( ' a ' , Commit ( ' c ' , tree = { ' a ' : ' 0 ' } ) , ref = ' heads/abranch ' )
pr1 = prod . make_pr ( target = ' a ' , head = ' abranch ' )
prod . post_status ( pr1 . head , ' success ' , ' legal/cla ' )
prod . post_status ( pr1 . head , ' success ' , ' ci/runbot ' )
pr1 . post_comment ( ' hansen r+ ' , reviewer )
env . run_crons ( )
with prod :
prod . post_status ( ' staging.a ' , ' success ' , ' legal/cla ' )
prod . post_status ( ' staging.a ' , ' success ' , ' ci/runbot ' )
env . run_crons ( )
pr1_id , pr2_id = env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' )
assert pr2_id . parent_id == pr2_id . source_id == pr1_id
with prod :
prod . post_status ( pr2_id . head , ' success ' , ' legal/cla ' )
prod . post_status ( pr2_id . head , ' success ' , ' ci/runbot ' )
env . run_crons ( )
pr1_id , pr2_id , pr3_id = env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' )
pr3 = prod . get_pr ( pr3_id . number )
with prod :
prod . post_status ( pr3_id . head , ' success ' , ' legal/cla ' )
prod . post_status ( pr3_id . head , ' success ' , ' ci/runbot ' )
[CHG] *: rewrite commands set, rework status management
This commit revisits the commands set in order to make it more
regular, and limit inconsistent command-sets, although it includes
pseudo-command aliases for common tasks now removed from the core set.
Hard Errors
===========
The previous iteration of the commands set would ignore any
non-command term in a command line. This has been changed to hard
error (and ignoring the entire thing) if any command is unknown or
invalid.
This fixes inconsistent / unexpected interpretations where a user
sends a command, then writes a novel on the same line some words of
which happen to *also* be commands, leading to merge states they did
not expect. They should now be told to fuck off.
Priority Restructuring
----------------------
The numerical priority system was pretty messy in that it confused
"staging priority" (in ways which were not entirely straightforward)
with overrides to other concerns.
This has now being split along all the axis, with separate command
subsets for:
- staging prioritisation, now separated between `default`, `priority`,
and `alone`,
- `default` means PRs are picked by an unspecified order when
creating a staging, if nothing better is available
- `priority` means PRs are picked first when staging, however if
`priority` PRs don't fill the staging the rest will be filled with
`default`, this mode did not previously exist
- `alone` means the PRs are picked first, before splits, and only
`alone` PRs can be part of the staging (which usually matches the
modename)
- `skipchecks` overrides both statuses and approval checks, for the
batch, something previously implied in `p=0`, but now
independent. Setting `skipchecks` basically makes the entire batch
`ready`.
For consistency this also sets the reviewer implicitly: since
skipchecks overrides both statuses *and approval*, whoever enables
this mode is essentially the reviewer.
- `cancel` cancels any ongoing staging when the marked PR becomes
ready again, previously this was also implied (in a more restricted
form) by setting `p=0`
FWBot removal
=============
While the "forwardport bot" still exists as an API level (to segregate
access rights between tokens) it has been removed as an interaction
point, as part of the modules merge plan. As a result,
fwbot stops responding
----------------------
Feedback messages are now always sent by the mergebot, the
forward-porting bot should not send any message or notification
anymore.
commands moved to the merge bot
-------------------------------
- `ignore`/`up to` simply changes bot
- `close` as well
- `skipci` is now a choice / flag of an `fw` command, which denotes
the forward-port policy,
- `fw=default` is the old `ci` and resets the policy to default,
that is wait for the PR to be merged to create forward ports, and
for the required statuses on each forward port to be received
before creating the next
- `fw=skipci` is the old `skipci`, it waits for the merge of the
base PR but then creates all the forward ports immediately (unless
it gets a conflict)
- `fw=skipmerge` immediately creates all the forward ports, without
even waiting for the PR to be merged
This is a completely new mode, and may be rather broken as until
now the 'bot has always assumed the source PR had been merged.
approval rework
---------------
Because of the previous section, there is no distinguishing feature
between `mergebot r+` = "merge this PR" and `forwardbot r+` = "merge
this PR and all its parent with different access rights".
As a result, the two have been merged under a single `mergebot r+`
with heuristics attempting to provide the best experience:
- if approving a non-forward port, the behavior does not change
- else, with review rights on the source, all ancestors are approved
- else, as author of the original, approves all ancestors which descend
from a merged PR
- else, approves all ancestors up to and including the oldest ancestor
to which we have review rights
Most notably, the source's author is not delegated on the source or
any of its descendants anymore. This might need to be revisited if it
provides too restrictive.
For the very specialized need of approving a forward-port *and none of
its ancestors*, `review=` can now take a comma (`,`) separated list of
pull request numbers (github numbers, not mergebot ids).
Computed State
==============
The `state` field of pull requests is now computed. Hopefully this
makes the status more consistent and predictable in the long run, and
importantly makes status management more reliable (because reference
datum get updated naturally flowing to the state).
For now however it makes things more complicated as some of the states
have to be separately signaled or updated:
- `closed` and `error` are now separate flags
- `merge_date` is pulled down from forwardport and becomes the
transition signal for ready -> merged
- `reviewed_by` becomes the transition signal for approval (might be a
good idea to rename it...)
- `status` is computed from the head's statuses and overrides, and
*that* becomes the validation state
Ideally, batch-level flags like `skipchecks` should be on, well, the
batch, and `state` should have a dependency on the batch. However
currently the batch is not a durable / permanent member of the system,
so it's a PR-level flag and a messy pile.
On notable change is that *forcing* the state to `ready` now does that
but also sets the reviewer, `skipchecks`, and overrides to ensure the
API-mediated readying does not get rolled back by e.g. the runbot
sending a status.
This is useful for a few types of automated / programmatic PRs
e.g. translation exports, where we set the state programmatically to
limit noise.
recursive dependency hack
-------------------------
Given a sequence of PRs with an override of the source, if one of the
PRs is updated its descendants should not have the override
anymore. However if the updated PR gets overridden, its descendants
should have *that* override.
This requires some unholy manipulations via an override of `modified`,
as the ORM supports recursive fields but not recursive
dependencies (on a different field).
unconditional followup scheduling
---------------------------------
Previously scheduling forward-port followup was contigent on the FW
policy, but it's not actually correct if the new PR is *immediately*
validated (which can happen now that the field is computed, if there
are no required statuses *or* all of the required statuses are
overridden by an ancestor) as nothing will trigger the state change
and thus scheduling of the fp followup.
The followup function checks all the properties of the batch to port,
so this should not result on incorrect ports. Although it's a bit more
expensive, and will lead to more spam.
Previously this would not happen because on creation of a PR the
validation task (commit -> PR) would still have to execute.
Misc Changes
============
- If a PR is marked as overriding / canceling stagings, it now does
so on retry not just when setting initially.
This was not handled at all previously, so a PR in P0 going into
error due to e.g. a non-deterministic bug would be retried and still
p=0, but a current staging would not get cancelled. Same when a PR
in p=0 goes into error because something was failed, then is updated
with a fix.
- Add tracking to a bunch of relevant PR fields.
Post-mortem analysis currently generally requires going through the
text logs to see what happened, which is annoying.
There is a nondeterminism / inconsistency in the tracking which
sometimes leads the admin user to trigger tracking before the bot
does, leading to the staging tracking being attributed to them
during tests, shove under the carpet by ignoring the user to whom
that tracking is attributed.
When multiple users update tracked fields in the same transaction
all the changes are attributed to the first one having triggered
tracking (?), I couldn't find why the admin sometimes takes over.
- added and leveraged support for enum-backed selection fields
- moved variuous fields from forwardport to runbot_merge
- fix a migration which had never worked and which never run (because
I forgot to bump the version on the module)
- remove some unnecessary intermediate de/serialisation
fixes #673, fixes #309, fixes #792, fixes #846 (probably)
2023-10-31 13:42:07 +07:00
pr3 . post_comment ( ' hansen r+ ' , reviewer )
2019-10-02 01:51:31 +07:00
env . run_crons ( )
prod . commit ( ' staging.c ' )
with prod :
prod . post_status ( ' staging.b ' , ' success ' , ' legal/cla ' )
prod . post_status ( ' staging.b ' , ' success ' , ' ci/runbot ' )
prod . post_status ( ' staging.c ' , ' failure ' , ' ci/runbot ' )
env . run_crons ( )
[CHG] *: persistent batches
This probably has latent bugs, and is only the start of the road to v2
(#789): PR batches are now created up-front (alongside the PR), with
PRs attached and detached as needed, hopefully such that things are
not broken (tests pass but...), this required a fair number of
ajustments to code not taking batches into account, or creating
batches on the fly.
`PullRequests.blocked` has also been updated to rely on the batch to
get its batch-mates, such that it can now be a stored field with the
right dependencies.
The next step is to better leverage this change:
- move cross-PR state up to the batch (e.g. skipchecks, priority, ...)
- add fw info to the batch, perform forward-ports batchwise in order
to avoid redundant batch-selection work, and allow altering batches
during fw (e.g. adding or removing PRs)
- use batches to select stagings
- maybe expose staging history of a batch?
2023-12-19 17:10:11 +07:00
pr3_head = env [ ' runbot_merge.commit ' ] . search ( [ ( ' sha ' , ' = ' , pr3_id . head ) ] )
assert pr3_head
2019-10-02 01:51:31 +07:00
# send a new status to the PR, as if somebody had rebuilt it or something
with prod :
pr3 . post_comment ( ' hansen retry ' , reviewer )
prod . post_status ( pr3_id . head , ' success ' , ' foo/bar ' )
prod . post_status ( pr3_id . head , ' success ' , ' legal/cla ' )
assert pr3_head . to_check , " check that the commit was updated as to process "
env . run_crons ( )
assert not pr3_head . to_check , " check that the commit was processed "
[CHG] *: persistent batches
This probably has latent bugs, and is only the start of the road to v2
(#789): PR batches are now created up-front (alongside the PR), with
PRs attached and detached as needed, hopefully such that things are
not broken (tests pass but...), this required a fair number of
ajustments to code not taking batches into account, or creating
batches on the fly.
`PullRequests.blocked` has also been updated to rely on the batch to
get its batch-mates, such that it can now be a stored field with the
right dependencies.
The next step is to better leverage this change:
- move cross-PR state up to the batch (e.g. skipchecks, priority, ...)
- add fw info to the batch, perform forward-ports batchwise in order
to avoid redundant batch-selection work, and allow altering batches
during fw (e.g. adding or removing PRs)
- use batches to select stagings
- maybe expose staging history of a batch?
2023-12-19 17:10:11 +07:00
assert pr3_id . state == ' ready '
assert pr3_id . staging_id
2020-01-22 21:41:42 +07:00
2024-09-05 18:02:10 +07:00
def test_fw_retry ( env , config , make_repo , users ) :
prod , _ = make_basic ( env , config , make_repo , statuses = ' default ' )
other_token = config [ ' role_other ' ] [ ' token ' ]
fork = prod . fork ( token = other_token )
with prod , fork :
fork . make_commits ( ' a ' , Commit ( ' c ' , tree = { ' a ' : ' 0 ' } ) , ref = ' heads/abranch ' )
pr1 = prod . make_pr (
title = " whatever " ,
target = ' a ' ,
head = f ' { fork . owner } :abranch ' ,
token = other_token ,
)
prod . post_status ( pr1 . head , ' success ' )
pr1 . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
other_partner = env [ ' res.partner ' ] . search ( [ ( ' github_login ' , ' = ' , users [ ' other ' ] ) ] )
assert len ( other_partner ) == 1
other_partner . email = " foo@example.com "
with prod :
prod . post_status ( ' staging.a ' , ' success ' )
env . run_crons ( )
_pr1_id , pr2_id = env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' )
pr2 = prod . get_pr ( pr2_id . number )
with prod :
prod . post_status ( pr2_id . head , ' success ' )
pr2 . post_comment ( ' hansen r+ ' , other_token )
env . run_crons ( )
assert not pr2_id . blocked
with prod :
prod . post_status ( ' staging.b ' , ' failure ' )
env . run_crons ( )
assert pr2_id . error
with prod :
pr2 . post_comment ( ' hansen r+ ' , other_token )
env . run_crons ( )
assert pr2_id . state == ' error '
with prod :
pr2 . post_comment ( ' hansen retry ' , other_token )
env . run_crons ( )
assert pr2_id . state == ' ready '
assert pr2 . comments == [
seen ( env , pr2 , users ) ,
( users [ ' user ' ] , " This PR targets b and is part of the forward-port chain. Further PRs will be created up to c. \n \n More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port \n " ) ,
( users [ ' other ' ] , ' hansen r+ ' ) ,
( users [ ' user ' ] , " @ {other} @ {reviewer} staging failed: default " . format_map ( users ) ) ,
( users [ ' other ' ] , ' hansen r+ ' ) ,
( users [ ' user ' ] , " This PR is already reviewed, it ' s in error, you might want to `retry` it instead (if you have already confirmed the error is not legitimate). " ) ,
( users [ ' other ' ] , ' hansen retry ' ) ,
]
2020-01-22 21:41:42 +07:00
class TestNotAllBranches :
""" Check that forward-ports don ' t behave completely insanely when not all
branches are supported on all repositories .
repo A branches a - > b - > c
a0 - > a1 - > a2 branch a
` - > a11 - > a22 branch b
` - > a111 branch c
repo B branches a - > c
b0 - > b1 - > b2 branch a
|
` - > b000 branch c
"""
@pytest.fixture
2020-02-10 21:05:08 +07:00
def repos ( self , env , config , make_repo , setreviewers ) :
2020-01-22 21:41:42 +07:00
a = make_repo ( ' A ' )
with a :
_ , a_ , _ = a . make_commits (
None ,
Commit ( ' a0 ' , tree = { ' a ' : ' 0 ' } ) ,
Commit ( ' a1 ' , tree = { ' a ' : ' 1 ' } ) ,
Commit ( ' a2 ' , tree = { ' a ' : ' 2 ' } ) ,
ref = ' heads/a '
)
b_ , _ = a . make_commits (
a_ ,
Commit ( ' a11 ' , tree = { ' b ' : ' 11 ' } ) ,
Commit ( ' a22 ' , tree = { ' b ' : ' 22 ' } ) ,
ref = ' heads/b '
)
a . make_commits ( b_ , Commit ( ' a111 ' , tree = { ' c ' : ' 111 ' } ) , ref = ' heads/c ' )
a_dev = a . fork ( )
b = make_repo ( ' B ' )
with b :
_ , _a , _ = b . make_commits (
None ,
Commit ( ' b0 ' , tree = { ' a ' : ' x ' } ) ,
Commit ( ' b1 ' , tree = { ' a ' : ' y ' } ) ,
Commit ( ' b2 ' , tree = { ' a ' : ' z ' } ) ,
ref = ' heads/a '
)
b . make_commits ( _a , Commit ( ' b000 ' , tree = { ' c ' : ' x ' } ) , ref = ' heads/c ' )
b_dev = b . fork ( )
2020-02-10 21:05:08 +07:00
project = env [ ' runbot_merge.project ' ] . create ( {
' name ' : ' proj ' ,
' github_token ' : config [ ' github ' ] [ ' token ' ] ,
' github_prefix ' : ' hansen ' ,
2024-11-29 15:04:06 +07:00
' github_name ' : config [ ' github ' ] [ ' name ' ] ,
' github_email ' : " foo@example.org " ,
2020-02-10 21:05:08 +07:00
' fp_github_token ' : config [ ' github ' ] [ ' token ' ] ,
2023-06-12 19:41:42 +07:00
' fp_github_name ' : ' herbert ' ,
2020-02-10 21:05:08 +07:00
' branch_ids ' : [
2023-06-08 13:19:43 +07:00
( 0 , 0 , { ' name ' : ' a ' , ' sequence ' : 2 } ) ,
( 0 , 0 , { ' name ' : ' b ' , ' sequence ' : 1 } ) ,
( 0 , 0 , { ' name ' : ' c ' , ' sequence ' : 0 } ) ,
2020-01-22 21:41:42 +07:00
]
} )
2020-02-10 21:05:08 +07:00
repo_a = env [ ' runbot_merge.repository ' ] . create ( {
' project_id ' : project . id ,
' name ' : a . name ,
' required_statuses ' : ' ci/runbot ' ,
' fp_remote_target ' : a_dev . name ,
} )
repo_b = env [ ' runbot_merge.repository ' ] . create ( {
' project_id ' : project . id ,
' name ' : b . name ,
' required_statuses ' : ' ci/runbot ' ,
' fp_remote_target ' : b_dev . name ,
' branch_filter ' : ' [( " name " , " in " , [ " a " , " c " ])] ' ,
} )
setreviewers ( repo_a , repo_b )
[ADD] *: per-repository webhook secret
Currently webhook secrets are configured per *project* which is an
issue both because different repositories may have different
administrators and thus creates safety concerns, and because multiple
repositories can feed into different projects (e.g. on mergebot,
odoo-dev/odoo is both an ancillary repository to the main RD project,
and the main repository to the minor / legacy master-wowl
project). This means it can be necessary to have multiple projects
share the same secret as well, this then mandates the secret for more
repositories per (1).
This is a pain in the ass, so just detach secrets from projects and
link them *only* to repositories, it's cleaner and easier to manage
and set up progressively.
This requires a lot of changes to the tests, as they all need to
correctly configure the signaling.
For `runbot_merge` there was *some* setup sharing already via the
module-level `repo` fixtures`, those were merged into a conftest-level
fixture which could handle the signaling setup. A few tests which
unnecessarily set up repositories ad-hoc were also moved to the
fixture. But for most of the ad-hoc setup in `runbot_merge`, as well
as `forwardport` where it's all ad-hoc, events sources setup was just
appended as is. This should probably be cleaned up at one point, with
the various requirements collected and organised into a small set of
fixtures doing the job more uniformly.
Fixes #887
2024-06-06 16:07:57 +07:00
env [ ' runbot_merge.events_sources ' ] . create ( [ { ' repository ' : a . name } , { ' repository ' : b . name } ] )
2020-01-22 21:41:42 +07:00
return project , a , a_dev , b , b_dev
def test_single_first ( self , env , repos , config ) :
""" A merge in A.a should be forward-ported to A.b and A.c
"""
project , a , a_dev , b , _ = repos
with a , a_dev :
[ c ] = a_dev . make_commits ( ' a ' , Commit ( ' pr ' , tree = { ' pr ' : ' 1 ' } ) , ref = ' heads/change ' )
pr = a . make_pr ( target = ' a ' , title = " a pr " , head = a_dev . owner + ' :change ' )
a . post_status ( c , ' success ' , ' ci/runbot ' )
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
2024-12-02 20:17:28 +07:00
p = to_pr ( env , pr )
2020-01-22 21:41:42 +07:00
env . run_crons ( )
assert p . staging_id
with a , b :
for repo in a , b :
repo . post_status ( ' staging.a ' , ' success ' , ' ci/runbot ' )
env . run_crons ( )
a_head = a . commit ( ' a ' )
assert a_head . message . startswith ( ' pr \n \n ' )
assert a . read_tree ( a_head ) == { ' a ' : ' 2 ' , ' pr ' : ' 1 ' }
pr0 , pr1 = env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' )
with a :
a . post_status ( pr1 . head , ' success ' , ' ci/runbot ' )
env . run_crons ( )
pr0 , pr1 , pr2 = env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' )
with a :
a . post_status ( pr2 . head , ' success ' , ' ci/runbot ' )
a . get_pr ( pr2 . number ) . post_comment (
[CHG] *: rewrite commands set, rework status management
This commit revisits the commands set in order to make it more
regular, and limit inconsistent command-sets, although it includes
pseudo-command aliases for common tasks now removed from the core set.
Hard Errors
===========
The previous iteration of the commands set would ignore any
non-command term in a command line. This has been changed to hard
error (and ignoring the entire thing) if any command is unknown or
invalid.
This fixes inconsistent / unexpected interpretations where a user
sends a command, then writes a novel on the same line some words of
which happen to *also* be commands, leading to merge states they did
not expect. They should now be told to fuck off.
Priority Restructuring
----------------------
The numerical priority system was pretty messy in that it confused
"staging priority" (in ways which were not entirely straightforward)
with overrides to other concerns.
This has now being split along all the axis, with separate command
subsets for:
- staging prioritisation, now separated between `default`, `priority`,
and `alone`,
- `default` means PRs are picked by an unspecified order when
creating a staging, if nothing better is available
- `priority` means PRs are picked first when staging, however if
`priority` PRs don't fill the staging the rest will be filled with
`default`, this mode did not previously exist
- `alone` means the PRs are picked first, before splits, and only
`alone` PRs can be part of the staging (which usually matches the
modename)
- `skipchecks` overrides both statuses and approval checks, for the
batch, something previously implied in `p=0`, but now
independent. Setting `skipchecks` basically makes the entire batch
`ready`.
For consistency this also sets the reviewer implicitly: since
skipchecks overrides both statuses *and approval*, whoever enables
this mode is essentially the reviewer.
- `cancel` cancels any ongoing staging when the marked PR becomes
ready again, previously this was also implied (in a more restricted
form) by setting `p=0`
FWBot removal
=============
While the "forwardport bot" still exists as an API level (to segregate
access rights between tokens) it has been removed as an interaction
point, as part of the modules merge plan. As a result,
fwbot stops responding
----------------------
Feedback messages are now always sent by the mergebot, the
forward-porting bot should not send any message or notification
anymore.
commands moved to the merge bot
-------------------------------
- `ignore`/`up to` simply changes bot
- `close` as well
- `skipci` is now a choice / flag of an `fw` command, which denotes
the forward-port policy,
- `fw=default` is the old `ci` and resets the policy to default,
that is wait for the PR to be merged to create forward ports, and
for the required statuses on each forward port to be received
before creating the next
- `fw=skipci` is the old `skipci`, it waits for the merge of the
base PR but then creates all the forward ports immediately (unless
it gets a conflict)
- `fw=skipmerge` immediately creates all the forward ports, without
even waiting for the PR to be merged
This is a completely new mode, and may be rather broken as until
now the 'bot has always assumed the source PR had been merged.
approval rework
---------------
Because of the previous section, there is no distinguishing feature
between `mergebot r+` = "merge this PR" and `forwardbot r+` = "merge
this PR and all its parent with different access rights".
As a result, the two have been merged under a single `mergebot r+`
with heuristics attempting to provide the best experience:
- if approving a non-forward port, the behavior does not change
- else, with review rights on the source, all ancestors are approved
- else, as author of the original, approves all ancestors which descend
from a merged PR
- else, approves all ancestors up to and including the oldest ancestor
to which we have review rights
Most notably, the source's author is not delegated on the source or
any of its descendants anymore. This might need to be revisited if it
provides too restrictive.
For the very specialized need of approving a forward-port *and none of
its ancestors*, `review=` can now take a comma (`,`) separated list of
pull request numbers (github numbers, not mergebot ids).
Computed State
==============
The `state` field of pull requests is now computed. Hopefully this
makes the status more consistent and predictable in the long run, and
importantly makes status management more reliable (because reference
datum get updated naturally flowing to the state).
For now however it makes things more complicated as some of the states
have to be separately signaled or updated:
- `closed` and `error` are now separate flags
- `merge_date` is pulled down from forwardport and becomes the
transition signal for ready -> merged
- `reviewed_by` becomes the transition signal for approval (might be a
good idea to rename it...)
- `status` is computed from the head's statuses and overrides, and
*that* becomes the validation state
Ideally, batch-level flags like `skipchecks` should be on, well, the
batch, and `state` should have a dependency on the batch. However
currently the batch is not a durable / permanent member of the system,
so it's a PR-level flag and a messy pile.
On notable change is that *forcing* the state to `ready` now does that
but also sets the reviewer, `skipchecks`, and overrides to ensure the
API-mediated readying does not get rolled back by e.g. the runbot
sending a status.
This is useful for a few types of automated / programmatic PRs
e.g. translation exports, where we set the state programmatically to
limit noise.
recursive dependency hack
-------------------------
Given a sequence of PRs with an override of the source, if one of the
PRs is updated its descendants should not have the override
anymore. However if the updated PR gets overridden, its descendants
should have *that* override.
This requires some unholy manipulations via an override of `modified`,
as the ORM supports recursive fields but not recursive
dependencies (on a different field).
unconditional followup scheduling
---------------------------------
Previously scheduling forward-port followup was contigent on the FW
policy, but it's not actually correct if the new PR is *immediately*
validated (which can happen now that the field is computed, if there
are no required statuses *or* all of the required statuses are
overridden by an ancestor) as nothing will trigger the state change
and thus scheduling of the fp followup.
The followup function checks all the properties of the batch to port,
so this should not result on incorrect ports. Although it's a bit more
expensive, and will lead to more spam.
Previously this would not happen because on creation of a PR the
validation task (commit -> PR) would still have to execute.
Misc Changes
============
- If a PR is marked as overriding / canceling stagings, it now does
so on retry not just when setting initially.
This was not handled at all previously, so a PR in P0 going into
error due to e.g. a non-deterministic bug would be retried and still
p=0, but a current staging would not get cancelled. Same when a PR
in p=0 goes into error because something was failed, then is updated
with a fix.
- Add tracking to a bunch of relevant PR fields.
Post-mortem analysis currently generally requires going through the
text logs to see what happened, which is annoying.
There is a nondeterminism / inconsistency in the tracking which
sometimes leads the admin user to trigger tracking before the bot
does, leading to the staging tracking being attributed to them
during tests, shove under the carpet by ignoring the user to whom
that tracking is attributed.
When multiple users update tracked fields in the same transaction
all the changes are attributed to the first one having triggered
tracking (?), I couldn't find why the admin sometimes takes over.
- added and leveraged support for enum-backed selection fields
- moved variuous fields from forwardport to runbot_merge
- fix a migration which had never worked and which never run (because
I forgot to bump the version on the module)
- remove some unnecessary intermediate de/serialisation
fixes #673, fixes #309, fixes #792, fixes #846 (probably)
2023-10-31 13:42:07 +07:00
' hansen r+ ' ,
2020-01-22 21:41:42 +07:00
config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
assert pr1 . staging_id
assert pr2 . staging_id
with a , b :
a . post_status ( ' staging.b ' , ' success ' , ' ci/runbot ' )
a . post_status ( ' staging.c ' , ' success ' , ' ci/runbot ' )
b . post_status ( ' staging.c ' , ' success ' , ' ci/runbot ' )
env . run_crons ( )
assert pr0 . state == ' merged '
assert pr1 . state == ' merged '
assert pr2 . state == ' merged '
assert a . read_tree ( a . commit ( ' b ' ) ) == { ' a ' : ' 1 ' , ' b ' : ' 22 ' , ' pr ' : ' 1 ' }
assert a . read_tree ( a . commit ( ' c ' ) ) == { ' a ' : ' 1 ' , ' b ' : ' 11 ' , ' c ' : ' 111 ' , ' pr ' : ' 1 ' }
def test_single_second ( self , env , repos , config ) :
""" A merge in B.a should " skip ahead " to B.c
"""
project , a , _ , b , b_dev = repos
with b , b_dev :
[ c ] = b_dev . make_commits ( ' a ' , Commit ( ' pr ' , tree = { ' pr ' : ' 1 ' } ) , ref = ' heads/change ' )
pr = b . make_pr ( target = ' a ' , title = " a pr " , head = b_dev . owner + ' :change ' )
b . post_status ( c , ' success ' , ' ci/runbot ' )
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
with a , b :
a . post_status ( ' staging.a ' , ' success ' , ' ci/runbot ' )
b . post_status ( ' staging.a ' , ' success ' , ' ci/runbot ' )
env . run_crons ( )
assert b . read_tree ( b . commit ( ' a ' ) ) == { ' a ' : ' z ' , ' pr ' : ' 1 ' }
pr0 , pr1 = env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' )
with b :
b . post_status ( pr1 . head , ' success ' , ' ci/runbot ' )
b . get_pr ( pr1 . number ) . post_comment (
[CHG] *: rewrite commands set, rework status management
This commit revisits the commands set in order to make it more
regular, and limit inconsistent command-sets, although it includes
pseudo-command aliases for common tasks now removed from the core set.
Hard Errors
===========
The previous iteration of the commands set would ignore any
non-command term in a command line. This has been changed to hard
error (and ignoring the entire thing) if any command is unknown or
invalid.
This fixes inconsistent / unexpected interpretations where a user
sends a command, then writes a novel on the same line some words of
which happen to *also* be commands, leading to merge states they did
not expect. They should now be told to fuck off.
Priority Restructuring
----------------------
The numerical priority system was pretty messy in that it confused
"staging priority" (in ways which were not entirely straightforward)
with overrides to other concerns.
This has now being split along all the axis, with separate command
subsets for:
- staging prioritisation, now separated between `default`, `priority`,
and `alone`,
- `default` means PRs are picked by an unspecified order when
creating a staging, if nothing better is available
- `priority` means PRs are picked first when staging, however if
`priority` PRs don't fill the staging the rest will be filled with
`default`, this mode did not previously exist
- `alone` means the PRs are picked first, before splits, and only
`alone` PRs can be part of the staging (which usually matches the
modename)
- `skipchecks` overrides both statuses and approval checks, for the
batch, something previously implied in `p=0`, but now
independent. Setting `skipchecks` basically makes the entire batch
`ready`.
For consistency this also sets the reviewer implicitly: since
skipchecks overrides both statuses *and approval*, whoever enables
this mode is essentially the reviewer.
- `cancel` cancels any ongoing staging when the marked PR becomes
ready again, previously this was also implied (in a more restricted
form) by setting `p=0`
FWBot removal
=============
While the "forwardport bot" still exists as an API level (to segregate
access rights between tokens) it has been removed as an interaction
point, as part of the modules merge plan. As a result,
fwbot stops responding
----------------------
Feedback messages are now always sent by the mergebot, the
forward-porting bot should not send any message or notification
anymore.
commands moved to the merge bot
-------------------------------
- `ignore`/`up to` simply changes bot
- `close` as well
- `skipci` is now a choice / flag of an `fw` command, which denotes
the forward-port policy,
- `fw=default` is the old `ci` and resets the policy to default,
that is wait for the PR to be merged to create forward ports, and
for the required statuses on each forward port to be received
before creating the next
- `fw=skipci` is the old `skipci`, it waits for the merge of the
base PR but then creates all the forward ports immediately (unless
it gets a conflict)
- `fw=skipmerge` immediately creates all the forward ports, without
even waiting for the PR to be merged
This is a completely new mode, and may be rather broken as until
now the 'bot has always assumed the source PR had been merged.
approval rework
---------------
Because of the previous section, there is no distinguishing feature
between `mergebot r+` = "merge this PR" and `forwardbot r+` = "merge
this PR and all its parent with different access rights".
As a result, the two have been merged under a single `mergebot r+`
with heuristics attempting to provide the best experience:
- if approving a non-forward port, the behavior does not change
- else, with review rights on the source, all ancestors are approved
- else, as author of the original, approves all ancestors which descend
from a merged PR
- else, approves all ancestors up to and including the oldest ancestor
to which we have review rights
Most notably, the source's author is not delegated on the source or
any of its descendants anymore. This might need to be revisited if it
provides too restrictive.
For the very specialized need of approving a forward-port *and none of
its ancestors*, `review=` can now take a comma (`,`) separated list of
pull request numbers (github numbers, not mergebot ids).
Computed State
==============
The `state` field of pull requests is now computed. Hopefully this
makes the status more consistent and predictable in the long run, and
importantly makes status management more reliable (because reference
datum get updated naturally flowing to the state).
For now however it makes things more complicated as some of the states
have to be separately signaled or updated:
- `closed` and `error` are now separate flags
- `merge_date` is pulled down from forwardport and becomes the
transition signal for ready -> merged
- `reviewed_by` becomes the transition signal for approval (might be a
good idea to rename it...)
- `status` is computed from the head's statuses and overrides, and
*that* becomes the validation state
Ideally, batch-level flags like `skipchecks` should be on, well, the
batch, and `state` should have a dependency on the batch. However
currently the batch is not a durable / permanent member of the system,
so it's a PR-level flag and a messy pile.
On notable change is that *forcing* the state to `ready` now does that
but also sets the reviewer, `skipchecks`, and overrides to ensure the
API-mediated readying does not get rolled back by e.g. the runbot
sending a status.
This is useful for a few types of automated / programmatic PRs
e.g. translation exports, where we set the state programmatically to
limit noise.
recursive dependency hack
-------------------------
Given a sequence of PRs with an override of the source, if one of the
PRs is updated its descendants should not have the override
anymore. However if the updated PR gets overridden, its descendants
should have *that* override.
This requires some unholy manipulations via an override of `modified`,
as the ORM supports recursive fields but not recursive
dependencies (on a different field).
unconditional followup scheduling
---------------------------------
Previously scheduling forward-port followup was contigent on the FW
policy, but it's not actually correct if the new PR is *immediately*
validated (which can happen now that the field is computed, if there
are no required statuses *or* all of the required statuses are
overridden by an ancestor) as nothing will trigger the state change
and thus scheduling of the fp followup.
The followup function checks all the properties of the batch to port,
so this should not result on incorrect ports. Although it's a bit more
expensive, and will lead to more spam.
Previously this would not happen because on creation of a PR the
validation task (commit -> PR) would still have to execute.
Misc Changes
============
- If a PR is marked as overriding / canceling stagings, it now does
so on retry not just when setting initially.
This was not handled at all previously, so a PR in P0 going into
error due to e.g. a non-deterministic bug would be retried and still
p=0, but a current staging would not get cancelled. Same when a PR
in p=0 goes into error because something was failed, then is updated
with a fix.
- Add tracking to a bunch of relevant PR fields.
Post-mortem analysis currently generally requires going through the
text logs to see what happened, which is annoying.
There is a nondeterminism / inconsistency in the tracking which
sometimes leads the admin user to trigger tracking before the bot
does, leading to the staging tracking being attributed to them
during tests, shove under the carpet by ignoring the user to whom
that tracking is attributed.
When multiple users update tracked fields in the same transaction
all the changes are attributed to the first one having triggered
tracking (?), I couldn't find why the admin sometimes takes over.
- added and leveraged support for enum-backed selection fields
- moved variuous fields from forwardport to runbot_merge
- fix a migration which had never worked and which never run (because
I forgot to bump the version on the module)
- remove some unnecessary intermediate de/serialisation
fixes #673, fixes #309, fixes #792, fixes #846 (probably)
2023-10-31 13:42:07 +07:00
' hansen r+ ' ,
2020-01-22 21:41:42 +07:00
config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
with a , b :
a . post_status ( ' staging.c ' , ' success ' , ' ci/runbot ' )
b . post_status ( ' staging.c ' , ' success ' , ' ci/runbot ' )
env . run_crons ( )
assert pr0 . state == ' merged '
assert pr1 . state == ' merged '
assert b . read_tree ( b . commit ( ' c ' ) ) == { ' a ' : ' y ' , ' c ' : ' x ' , ' pr ' : ' 1 ' }
def test_both_first ( self , env , repos , config , users ) :
""" A merge in A.a, B.a should... not be forward-ported at all?
"""
project , a , a_dev , b , b_dev = repos
with a , a_dev :
[ c_a ] = a_dev . make_commits ( ' a ' , Commit ( ' pr a ' , tree = { ' pr ' : ' a ' } ) , ref = ' heads/change ' )
pr_a = a . make_pr ( target = ' a ' , title = ' a pr ' , head = a_dev . owner + ' :change ' )
a . post_status ( c_a , ' success ' , ' ci/runbot ' )
pr_a . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
with b , b_dev :
[ c_b ] = b_dev . make_commits ( ' a ' , Commit ( ' pr b ' , tree = { ' pr ' : ' b ' } ) , ref = ' heads/change ' )
pr_b = b . make_pr ( target = ' a ' , title = ' b pr ' , head = b_dev . owner + ' :change ' )
b . post_status ( c_b , ' success ' , ' ci/runbot ' )
pr_b . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
with a , b :
for repo in a , b :
repo . post_status ( ' staging.a ' , ' success ' , ' ci/runbot ' )
env . run_crons ( )
2024-12-02 20:17:28 +07:00
pr_a_id = to_pr ( env , pr_a )
pr_b_id = to_pr ( env , pr_b )
2020-01-22 21:41:42 +07:00
assert pr_a_id . state == pr_b_id . state == ' merged '
assert env [ ' runbot_merge.pull_requests ' ] . search ( [ ] ) == pr_a_id | pr_b_id
# should have refused to create a forward port because the PRs have
# different next target
assert pr_a . comments == [
( users [ ' reviewer ' ] , ' hansen r+ ' ) ,
2020-11-17 21:21:21 +07:00
seen ( env , pr_a , users ) ,
2023-06-12 19:41:42 +07:00
( users [ ' user ' ] , " @ %s @ %s this pull request can not be forward-ported: "
2022-06-23 19:25:07 +07:00
" next branch is ' b ' but linked pull request %s "
" has a next branch ' c ' . " % (
users [ ' user ' ] , users [ ' reviewer ' ] , pr_b_id . display_name ,
) ) ,
2020-01-22 21:41:42 +07:00
]
assert pr_b . comments == [
( users [ ' reviewer ' ] , ' hansen r+ ' ) ,
2020-11-17 21:21:21 +07:00
seen ( env , pr_b , users ) ,
2023-06-12 19:41:42 +07:00
( users [ ' user ' ] , " @ %s @ %s this pull request can not be forward-ported: "
2022-06-23 19:25:07 +07:00
" next branch is ' c ' but linked pull request %s "
" has a next branch ' b ' . " % (
users [ ' user ' ] , users [ ' reviewer ' ] , pr_a_id . display_name ,
) ) ,
2020-01-22 21:41:42 +07:00
]
2020-01-27 21:39:25 +07:00
def test_new_intermediate_branch ( env , config , make_repo ) :
""" In the case of a freeze / release a new intermediate branch appears in
the sequence . New or ongoing forward ports should pick it up just fine ( as
the " next target " is decided when a PR is ported forward ) however this is
an issue for existing yet - to - be - merged sequences e . g . given the branches
1.0 , 2.0 and master , if a branch 3.0 is forked off from master and inserted
before it , we need to create a new * intermediate * forward port PR
"""
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
def validate ( repo , commit ) :
repo . post_status ( commit , ' success ' , ' ci/runbot ' )
repo . post_status ( commit , ' success ' , ' legal/cla ' )
2024-02-26 15:51:22 +07:00
prod , _ = make_basic ( env , config , make_repo )
prod2 , _ = make_basic ( env , config , make_repo )
project = env [ ' runbot_merge.project ' ] . search ( [ ] )
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
assert len ( project . repo_ids ) == 2
2020-01-27 21:39:25 +07:00
original_c_tree = prod . read_tree ( prod . commit ( ' c ' ) )
prs = [ ]
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
with prod , prod2 :
2022-11-10 21:55:21 +07:00
for i in [ ' 0 ' , ' 1 ' ] :
2020-01-27 21:39:25 +07:00
prod . make_commits ( ' a ' , Commit ( i , tree = { i : i } ) , ref = ' heads/branch %s ' % i )
pr = prod . make_pr ( target = ' a ' , head = ' branch %s ' % i )
prs . append ( pr )
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
validate ( prod , pr . head )
2020-01-27 21:39:25 +07:00
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
2022-11-10 21:55:21 +07:00
2020-01-27 21:39:25 +07:00
# also add a PR targeting b forward-ported to c, in order to check
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
# for an insertion right after the source, as well as have linked PRs in
# two different repos
2020-01-27 21:39:25 +07:00
prod . make_commits ( ' b ' , Commit ( ' x ' , tree = { ' x ' : ' x ' } ) , ref = ' heads/branchx ' )
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
prod2 . make_commits ( ' b ' , Commit ( ' x2 ' , tree = { ' x ' : ' x2 ' } ) , ref = ' heads/branchx ' )
2020-01-27 21:39:25 +07:00
prx = prod . make_pr ( target = ' b ' , head = ' branchx ' )
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
prx2 = prod2 . make_pr ( target = ' b ' , head = ' branchx ' )
validate ( prod , prx . head )
validate ( prod2 , prx2 . head )
2020-01-27 21:39:25 +07:00
prx . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
prx2 . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
2020-01-27 21:39:25 +07:00
env . run_crons ( )
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
with prod , prod2 :
for r in [ prod , prod2 ] :
validate ( r , ' staging.a ' )
validate ( r , ' staging.b ' )
2020-01-27 21:39:25 +07:00
env . run_crons ( )
# should have merged pr1, pr2 and prx and created their forward ports, now
# validate pr0's FP so the c-targeted FP is created
PRs = env [ ' runbot_merge.pull_requests ' ]
2022-11-10 21:55:21 +07:00
pr0_id = to_pr ( env , prs [ 0 ] )
2020-01-27 21:39:25 +07:00
pr0_fp_id = PRs . search ( [
( ' source_id ' , ' = ' , pr0_id . id ) ,
] )
assert pr0_fp_id
assert pr0_fp_id . target . name == ' b '
with prod :
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
validate ( prod , pr0_fp_id . head )
2020-01-27 21:39:25 +07:00
env . run_crons ( )
2023-06-12 19:41:42 +07:00
assert pr0_fp_id . state == ' validated '
2020-01-27 21:39:25 +07:00
original0 = PRs . search ( [ ( ' parent_id ' , ' = ' , pr0_fp_id . id ) ] )
assert original0 , " Could not find FP of PR0 to C "
assert original0 . target . name == ' c '
2022-11-10 21:55:21 +07:00
pr1_id = to_pr ( env , prs [ 1 ] )
pr1_fp_id = PRs . search ( [ ( ' source_id ' , ' = ' , pr1_id . id ) ] )
assert pr1_fp_id . target . name == ' b '
2020-01-27 21:39:25 +07:00
# also check prx's fp
2022-11-10 21:55:21 +07:00
prx_id = to_pr ( env , prx )
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
prx2_id = to_pr ( env , prx2 )
assert prx_id . label == prx2_id . label
2020-01-27 21:39:25 +07:00
prx_fp_id = PRs . search ( [ ( ' source_id ' , ' = ' , prx_id . id ) ] )
assert prx_fp_id
assert prx_fp_id . target . name == ' c '
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
prx2_fp_id = PRs . search ( [ ( ' source_id ' , ' = ' , prx2_id . id ) ] )
assert prx2_fp_id
assert prx2_fp_id . target . name == ' c '
assert prx_fp_id . label == prx2_fp_id . label , \
" ensure labels of PRs of same batch are the same "
2020-01-27 21:39:25 +07:00
# NOTE: the branch must be created on git(hub) first, probably
# create new branch forked from the "current master" (c)
c = prod . commit ( ' c ' ) . id
with prod :
prod . make_ref ( ' heads/new ' , c )
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
c2 = prod2 . commit ( ' c ' ) . id
with prod2 :
prod2 . make_ref ( ' heads/new ' , c2 )
2020-01-27 21:39:25 +07:00
currents = { branch . name : branch . id for branch in project . branch_ids }
# insert a branch between "b" and "c"
project . write ( {
' branch_ids ' : [
2022-12-08 16:42:12 +07:00
( 1 , currents [ ' a ' ] , { ' sequence ' : 3 } ) ,
( 1 , currents [ ' b ' ] , { ' sequence ' : 2 , ' active ' : False } ) ,
( 1 , currents [ ' c ' ] , { ' sequence ' : 0 } )
2020-01-27 21:39:25 +07:00
]
} )
env . run_crons ( )
2022-11-10 21:55:21 +07:00
project . write ( {
' branch_ids ' : [
2023-06-08 13:19:43 +07:00
( 0 , False , { ' name ' : ' new ' , ' sequence ' : 1 } ) ,
2022-11-10 21:55:21 +07:00
]
} )
env . run_crons ( )
2023-06-12 19:41:42 +07:00
assert pr0_fp_id . state == ' validated '
2022-11-10 21:55:21 +07:00
# created an intermediate PR for 0 and x
desc0 = PRs . search ( [ ( ' source_id ' , ' = ' , pr0_id . id ) ] )
new0 = desc0 - pr0_fp_id - original0
2020-01-27 21:39:25 +07:00
assert len ( new0 ) == 1
assert new0 . parent_id == pr0_fp_id
2022-11-10 21:55:21 +07:00
assert new0 . target . name == ' new '
2020-01-27 21:39:25 +07:00
assert original0 . parent_id == new0
descx = PRs . search ( [ ( ' source_id ' , ' = ' , prx_id . id ) ] )
newx = descx - prx_fp_id
assert len ( newx ) == 1
assert newx . parent_id == prx_id
2022-11-10 21:55:21 +07:00
assert newx . target . name == ' new '
2020-01-27 21:39:25 +07:00
assert prx_fp_id . parent_id == newx
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
descx2 = PRs . search ( [ ( ' source_id ' , ' = ' , prx2_id . id ) ] )
newx2 = descx2 - prx2_fp_id
assert len ( newx2 ) == 1
assert newx2 . parent_id == prx2_id
assert newx2 . target . name == ' new '
assert prx2_fp_id . parent_id == newx2
assert newx . label == newx2 . label
2022-11-10 21:55:21 +07:00
# created followups for 1
# earliest followup is followup from deactivating a branch, creates fp in
# n+1 = c (from b), then inserting a new branch between b and c should
# create a bridge forward port
_ , pr1_c , pr1_new = PRs . search ( [ ( ' source_id ' , ' = ' , pr1_id . id ) ] , order = ' number ' )
assert pr1_c . target . name == ' c '
assert pr1_new . target . name == ' new '
assert pr1_c . parent_id == pr1_new
assert pr1_new . parent_id == pr1_fp_id
2020-01-27 21:39:25 +07:00
# ci on pr1/pr2 fp to b
2022-11-10 21:55:21 +07:00
sources = [ to_pr ( env , pr ) . id for pr in prs ]
2020-01-27 21:39:25 +07:00
sources . append ( prx_id . id )
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
sources . append ( prx2_id . id )
def get_repo ( pr ) :
if pr . repository . name == prod . name :
return prod
return prod2
2020-01-27 21:39:25 +07:00
# CI all the forward port PRs (shouldn't hurt to re-ci the forward port of
# prs[0] to b aka pr0_fp_id
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
fps = PRs . search ( [ ( ' source_id ' , ' in ' , sources ) , ( ' target.name ' , ' = ' , [ ' new ' , ' c ' ] ) ] )
with prod , prod2 :
for fp in fps :
validate ( get_repo ( fp ) , fp . head )
env . run_crons ( )
2022-11-10 21:55:21 +07:00
# now fps should be the last PR of each sequence, and thus r+-able (via
# fwbot so preceding PR is also r+'d)
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
with prod , prod2 :
for pr in fps . filtered ( lambda p : p . target . name == ' c ' ) :
get_repo ( pr ) . get_pr ( pr . number ) . post_comment (
[CHG] *: rewrite commands set, rework status management
This commit revisits the commands set in order to make it more
regular, and limit inconsistent command-sets, although it includes
pseudo-command aliases for common tasks now removed from the core set.
Hard Errors
===========
The previous iteration of the commands set would ignore any
non-command term in a command line. This has been changed to hard
error (and ignoring the entire thing) if any command is unknown or
invalid.
This fixes inconsistent / unexpected interpretations where a user
sends a command, then writes a novel on the same line some words of
which happen to *also* be commands, leading to merge states they did
not expect. They should now be told to fuck off.
Priority Restructuring
----------------------
The numerical priority system was pretty messy in that it confused
"staging priority" (in ways which were not entirely straightforward)
with overrides to other concerns.
This has now being split along all the axis, with separate command
subsets for:
- staging prioritisation, now separated between `default`, `priority`,
and `alone`,
- `default` means PRs are picked by an unspecified order when
creating a staging, if nothing better is available
- `priority` means PRs are picked first when staging, however if
`priority` PRs don't fill the staging the rest will be filled with
`default`, this mode did not previously exist
- `alone` means the PRs are picked first, before splits, and only
`alone` PRs can be part of the staging (which usually matches the
modename)
- `skipchecks` overrides both statuses and approval checks, for the
batch, something previously implied in `p=0`, but now
independent. Setting `skipchecks` basically makes the entire batch
`ready`.
For consistency this also sets the reviewer implicitly: since
skipchecks overrides both statuses *and approval*, whoever enables
this mode is essentially the reviewer.
- `cancel` cancels any ongoing staging when the marked PR becomes
ready again, previously this was also implied (in a more restricted
form) by setting `p=0`
FWBot removal
=============
While the "forwardport bot" still exists as an API level (to segregate
access rights between tokens) it has been removed as an interaction
point, as part of the modules merge plan. As a result,
fwbot stops responding
----------------------
Feedback messages are now always sent by the mergebot, the
forward-porting bot should not send any message or notification
anymore.
commands moved to the merge bot
-------------------------------
- `ignore`/`up to` simply changes bot
- `close` as well
- `skipci` is now a choice / flag of an `fw` command, which denotes
the forward-port policy,
- `fw=default` is the old `ci` and resets the policy to default,
that is wait for the PR to be merged to create forward ports, and
for the required statuses on each forward port to be received
before creating the next
- `fw=skipci` is the old `skipci`, it waits for the merge of the
base PR but then creates all the forward ports immediately (unless
it gets a conflict)
- `fw=skipmerge` immediately creates all the forward ports, without
even waiting for the PR to be merged
This is a completely new mode, and may be rather broken as until
now the 'bot has always assumed the source PR had been merged.
approval rework
---------------
Because of the previous section, there is no distinguishing feature
between `mergebot r+` = "merge this PR" and `forwardbot r+` = "merge
this PR and all its parent with different access rights".
As a result, the two have been merged under a single `mergebot r+`
with heuristics attempting to provide the best experience:
- if approving a non-forward port, the behavior does not change
- else, with review rights on the source, all ancestors are approved
- else, as author of the original, approves all ancestors which descend
from a merged PR
- else, approves all ancestors up to and including the oldest ancestor
to which we have review rights
Most notably, the source's author is not delegated on the source or
any of its descendants anymore. This might need to be revisited if it
provides too restrictive.
For the very specialized need of approving a forward-port *and none of
its ancestors*, `review=` can now take a comma (`,`) separated list of
pull request numbers (github numbers, not mergebot ids).
Computed State
==============
The `state` field of pull requests is now computed. Hopefully this
makes the status more consistent and predictable in the long run, and
importantly makes status management more reliable (because reference
datum get updated naturally flowing to the state).
For now however it makes things more complicated as some of the states
have to be separately signaled or updated:
- `closed` and `error` are now separate flags
- `merge_date` is pulled down from forwardport and becomes the
transition signal for ready -> merged
- `reviewed_by` becomes the transition signal for approval (might be a
good idea to rename it...)
- `status` is computed from the head's statuses and overrides, and
*that* becomes the validation state
Ideally, batch-level flags like `skipchecks` should be on, well, the
batch, and `state` should have a dependency on the batch. However
currently the batch is not a durable / permanent member of the system,
so it's a PR-level flag and a messy pile.
On notable change is that *forcing* the state to `ready` now does that
but also sets the reviewer, `skipchecks`, and overrides to ensure the
API-mediated readying does not get rolled back by e.g. the runbot
sending a status.
This is useful for a few types of automated / programmatic PRs
e.g. translation exports, where we set the state programmatically to
limit noise.
recursive dependency hack
-------------------------
Given a sequence of PRs with an override of the source, if one of the
PRs is updated its descendants should not have the override
anymore. However if the updated PR gets overridden, its descendants
should have *that* override.
This requires some unholy manipulations via an override of `modified`,
as the ORM supports recursive fields but not recursive
dependencies (on a different field).
unconditional followup scheduling
---------------------------------
Previously scheduling forward-port followup was contigent on the FW
policy, but it's not actually correct if the new PR is *immediately*
validated (which can happen now that the field is computed, if there
are no required statuses *or* all of the required statuses are
overridden by an ancestor) as nothing will trigger the state change
and thus scheduling of the fp followup.
The followup function checks all the properties of the batch to port,
so this should not result on incorrect ports. Although it's a bit more
expensive, and will lead to more spam.
Previously this would not happen because on creation of a PR the
validation task (commit -> PR) would still have to execute.
Misc Changes
============
- If a PR is marked as overriding / canceling stagings, it now does
so on retry not just when setting initially.
This was not handled at all previously, so a PR in P0 going into
error due to e.g. a non-deterministic bug would be retried and still
p=0, but a current staging would not get cancelled. Same when a PR
in p=0 goes into error because something was failed, then is updated
with a fix.
- Add tracking to a bunch of relevant PR fields.
Post-mortem analysis currently generally requires going through the
text logs to see what happened, which is annoying.
There is a nondeterminism / inconsistency in the tracking which
sometimes leads the admin user to trigger tracking before the bot
does, leading to the staging tracking being attributed to them
during tests, shove under the carpet by ignoring the user to whom
that tracking is attributed.
When multiple users update tracked fields in the same transaction
all the changes are attributed to the first one having triggered
tracking (?), I couldn't find why the admin sometimes takes over.
- added and leveraged support for enum-backed selection fields
- moved variuous fields from forwardport to runbot_merge
- fix a migration which had never worked and which never run (because
I forgot to bump the version on the module)
- remove some unnecessary intermediate de/serialisation
fixes #673, fixes #309, fixes #792, fixes #846 (probably)
2023-10-31 13:42:07 +07:00
' hansen r+ ' ,
2020-01-27 21:39:25 +07:00
config [ ' role_reviewer ' ] [ ' token ' ] )
2022-11-10 21:55:21 +07:00
assert all ( p . state == ' merged ' for p in PRs . browse ( sources ) ) , \
2020-01-27 21:39:25 +07:00
" all sources should be merged "
2022-11-10 21:55:21 +07:00
assert all ( p . state == ' ready ' for p in PRs . search ( [ ( ' source_id ' , ' != ' , False ) , ( ' target.name ' , ' != ' , ' b ' ) ] ) ) , \
" All PRs except sources and prs on disabled branch should be ready "
2020-01-27 21:39:25 +07:00
env . run_crons ( )
2022-11-10 21:55:21 +07:00
assert len ( env [ ' runbot_merge.stagings ' ] . search ( [ ] ) ) == 2 , \
" enabled branches should have been staged "
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
with prod , prod2 :
2022-11-10 21:55:21 +07:00
for target in [ ' new ' , ' c ' ] :
[FIX] forwardport: correct batching of intermediate PRs
When inserting a new branch, if there are extant forward ports
sequences spanning over the insertion, new forward ports must be
created for the new branch.
This was the case, *however* one of the cases was handled incorrectly:
PR batches (PRs to different repositories staged together) must be
forward-ported as batches. However when creating the "insert" batches,
these PRs would be split apart, leading to either inconsistent states
or an inability to merge the new forward ports (if they are
co-dependent which is a common case). This is because the handler
would look for all the relevant PRs to forward ports, but it would
fail to group them by label thereafter.
Fix that, create the `insert` batches correctly, and augment the
relevant test with an additional repository to test this case.
No gh issue was created, but this problem was reported on
odoo/odoo#110029 and odoo/enterprise#35838 (they were re-linked by
hand in the runbot and mergebot, but on github the labels remain
visibly different, one having the slug ZCwE while the other
hasO7Pr).
It's possible that the problem had occurred previously and not been
noticed (or reported), though it's also possible this issue had never
occurred before: for the latest freeze (16.1) there were 18 forward
ports spanning the insertion, but of them only 2 were the same batch.
2023-01-19 16:49:16 +07:00
validate ( prod , f ' staging. { target } ' )
validate ( prod2 , f ' staging. { target } ' )
2020-01-27 21:39:25 +07:00
env . run_crons ( )
2022-11-10 21:55:21 +07:00
assert all ( p . state == ' merged ' for p in PRs . search ( [ ( ' target.name ' , ' != ' , ' b ' ) ] ) ) , \
" All PRs except disabled branch should be merged now "
2020-01-27 21:39:25 +07:00
assert prod . read_tree ( prod . commit ( ' c ' ) ) == {
* * original_c_tree ,
2022-11-10 21:55:21 +07:00
' 0 ' : ' 0 ' , ' 1 ' : ' 1 ' , # updates from PRs
2020-01-27 21:39:25 +07:00
' x ' : ' x ' ,
} , " check that C got all the updates "
assert prod . read_tree ( prod . commit ( ' new ' ) ) == {
* * original_c_tree ,
2022-11-10 21:55:21 +07:00
' 0 ' : ' 0 ' , ' 1 ' : ' 1 ' , # updates from PRs
2020-01-27 21:39:25 +07:00
' x ' : ' x ' ,
} , " check that new got all the updates (should be in the same state as c really) "
2020-03-02 19:42:07 +07:00
2020-04-16 17:42:01 +07:00
def test_author_can_close_via_fwbot ( env , config , make_repo ) :
2024-02-26 15:51:22 +07:00
prod , _ = make_basic ( env , config , make_repo )
2020-03-02 19:42:07 +07:00
other_user = config [ ' role_other ' ]
other_token = other_user [ ' token ' ]
other = prod . fork ( token = other_token )
with prod , other :
[ c ] = other . make_commits ( ' a ' , Commit ( ' c ' , tree = { ' 0 ' : ' 0 ' } ) , ref = ' heads/change ' )
pr = prod . make_pr (
target = ' a ' , title = ' my change ' ,
head = other_user [ ' user ' ] + ' :change ' ,
token = other_token
)
# should be able to close and open own PR
pr . close ( other_token )
pr . open ( other_token )
prod . post_status ( c , ' success ' , ' legal/cla ' )
prod . post_status ( c , ' success ' , ' ci/runbot ' )
[CHG] *: rewrite commands set, rework status management
This commit revisits the commands set in order to make it more
regular, and limit inconsistent command-sets, although it includes
pseudo-command aliases for common tasks now removed from the core set.
Hard Errors
===========
The previous iteration of the commands set would ignore any
non-command term in a command line. This has been changed to hard
error (and ignoring the entire thing) if any command is unknown or
invalid.
This fixes inconsistent / unexpected interpretations where a user
sends a command, then writes a novel on the same line some words of
which happen to *also* be commands, leading to merge states they did
not expect. They should now be told to fuck off.
Priority Restructuring
----------------------
The numerical priority system was pretty messy in that it confused
"staging priority" (in ways which were not entirely straightforward)
with overrides to other concerns.
This has now being split along all the axis, with separate command
subsets for:
- staging prioritisation, now separated between `default`, `priority`,
and `alone`,
- `default` means PRs are picked by an unspecified order when
creating a staging, if nothing better is available
- `priority` means PRs are picked first when staging, however if
`priority` PRs don't fill the staging the rest will be filled with
`default`, this mode did not previously exist
- `alone` means the PRs are picked first, before splits, and only
`alone` PRs can be part of the staging (which usually matches the
modename)
- `skipchecks` overrides both statuses and approval checks, for the
batch, something previously implied in `p=0`, but now
independent. Setting `skipchecks` basically makes the entire batch
`ready`.
For consistency this also sets the reviewer implicitly: since
skipchecks overrides both statuses *and approval*, whoever enables
this mode is essentially the reviewer.
- `cancel` cancels any ongoing staging when the marked PR becomes
ready again, previously this was also implied (in a more restricted
form) by setting `p=0`
FWBot removal
=============
While the "forwardport bot" still exists as an API level (to segregate
access rights between tokens) it has been removed as an interaction
point, as part of the modules merge plan. As a result,
fwbot stops responding
----------------------
Feedback messages are now always sent by the mergebot, the
forward-porting bot should not send any message or notification
anymore.
commands moved to the merge bot
-------------------------------
- `ignore`/`up to` simply changes bot
- `close` as well
- `skipci` is now a choice / flag of an `fw` command, which denotes
the forward-port policy,
- `fw=default` is the old `ci` and resets the policy to default,
that is wait for the PR to be merged to create forward ports, and
for the required statuses on each forward port to be received
before creating the next
- `fw=skipci` is the old `skipci`, it waits for the merge of the
base PR but then creates all the forward ports immediately (unless
it gets a conflict)
- `fw=skipmerge` immediately creates all the forward ports, without
even waiting for the PR to be merged
This is a completely new mode, and may be rather broken as until
now the 'bot has always assumed the source PR had been merged.
approval rework
---------------
Because of the previous section, there is no distinguishing feature
between `mergebot r+` = "merge this PR" and `forwardbot r+` = "merge
this PR and all its parent with different access rights".
As a result, the two have been merged under a single `mergebot r+`
with heuristics attempting to provide the best experience:
- if approving a non-forward port, the behavior does not change
- else, with review rights on the source, all ancestors are approved
- else, as author of the original, approves all ancestors which descend
from a merged PR
- else, approves all ancestors up to and including the oldest ancestor
to which we have review rights
Most notably, the source's author is not delegated on the source or
any of its descendants anymore. This might need to be revisited if it
provides too restrictive.
For the very specialized need of approving a forward-port *and none of
its ancestors*, `review=` can now take a comma (`,`) separated list of
pull request numbers (github numbers, not mergebot ids).
Computed State
==============
The `state` field of pull requests is now computed. Hopefully this
makes the status more consistent and predictable in the long run, and
importantly makes status management more reliable (because reference
datum get updated naturally flowing to the state).
For now however it makes things more complicated as some of the states
have to be separately signaled or updated:
- `closed` and `error` are now separate flags
- `merge_date` is pulled down from forwardport and becomes the
transition signal for ready -> merged
- `reviewed_by` becomes the transition signal for approval (might be a
good idea to rename it...)
- `status` is computed from the head's statuses and overrides, and
*that* becomes the validation state
Ideally, batch-level flags like `skipchecks` should be on, well, the
batch, and `state` should have a dependency on the batch. However
currently the batch is not a durable / permanent member of the system,
so it's a PR-level flag and a messy pile.
On notable change is that *forcing* the state to `ready` now does that
but also sets the reviewer, `skipchecks`, and overrides to ensure the
API-mediated readying does not get rolled back by e.g. the runbot
sending a status.
This is useful for a few types of automated / programmatic PRs
e.g. translation exports, where we set the state programmatically to
limit noise.
recursive dependency hack
-------------------------
Given a sequence of PRs with an override of the source, if one of the
PRs is updated its descendants should not have the override
anymore. However if the updated PR gets overridden, its descendants
should have *that* override.
This requires some unholy manipulations via an override of `modified`,
as the ORM supports recursive fields but not recursive
dependencies (on a different field).
unconditional followup scheduling
---------------------------------
Previously scheduling forward-port followup was contigent on the FW
policy, but it's not actually correct if the new PR is *immediately*
validated (which can happen now that the field is computed, if there
are no required statuses *or* all of the required statuses are
overridden by an ancestor) as nothing will trigger the state change
and thus scheduling of the fp followup.
The followup function checks all the properties of the batch to port,
so this should not result on incorrect ports. Although it's a bit more
expensive, and will lead to more spam.
Previously this would not happen because on creation of a PR the
validation task (commit -> PR) would still have to execute.
Misc Changes
============
- If a PR is marked as overriding / canceling stagings, it now does
so on retry not just when setting initially.
This was not handled at all previously, so a PR in P0 going into
error due to e.g. a non-deterministic bug would be retried and still
p=0, but a current staging would not get cancelled. Same when a PR
in p=0 goes into error because something was failed, then is updated
with a fix.
- Add tracking to a bunch of relevant PR fields.
Post-mortem analysis currently generally requires going through the
text logs to see what happened, which is annoying.
There is a nondeterminism / inconsistency in the tracking which
sometimes leads the admin user to trigger tracking before the bot
does, leading to the staging tracking being attributed to them
during tests, shove under the carpet by ignoring the user to whom
that tracking is attributed.
When multiple users update tracked fields in the same transaction
all the changes are attributed to the first one having triggered
tracking (?), I couldn't find why the admin sometimes takes over.
- added and leveraged support for enum-backed selection fields
- moved variuous fields from forwardport to runbot_merge
- fix a migration which had never worked and which never run (because
I forgot to bump the version on the module)
- remove some unnecessary intermediate de/serialisation
fixes #673, fixes #309, fixes #792, fixes #846 (probably)
2023-10-31 13:42:07 +07:00
pr . post_comment ( ' hansen close ' , other_token )
2020-03-02 19:42:07 +07:00
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
assert pr . state == ' open '
with prod :
prod . post_status ( ' staging.a ' , ' success ' , ' legal/cla ' )
prod . post_status ( ' staging.a ' , ' success ' , ' ci/runbot ' )
env . run_crons ( )
pr0_id , pr1_id = env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' )
assert pr0_id . number == pr . number
pr1 = prod . get_pr ( pr1_id . number )
2022-07-28 17:48:13 +07:00
# `other` can't close fw PR directly, because that requires triage (and even
# write depending on account type) access to the repo, which an external
# contributor probably does not have
2020-03-02 19:42:07 +07:00
with prod , pytest . raises ( Exception ) :
2022-07-28 17:48:13 +07:00
pr1 . close ( other_token )
2020-03-02 19:42:07 +07:00
# use can close via fwbot
with prod :
[CHG] *: rewrite commands set, rework status management
This commit revisits the commands set in order to make it more
regular, and limit inconsistent command-sets, although it includes
pseudo-command aliases for common tasks now removed from the core set.
Hard Errors
===========
The previous iteration of the commands set would ignore any
non-command term in a command line. This has been changed to hard
error (and ignoring the entire thing) if any command is unknown or
invalid.
This fixes inconsistent / unexpected interpretations where a user
sends a command, then writes a novel on the same line some words of
which happen to *also* be commands, leading to merge states they did
not expect. They should now be told to fuck off.
Priority Restructuring
----------------------
The numerical priority system was pretty messy in that it confused
"staging priority" (in ways which were not entirely straightforward)
with overrides to other concerns.
This has now being split along all the axis, with separate command
subsets for:
- staging prioritisation, now separated between `default`, `priority`,
and `alone`,
- `default` means PRs are picked by an unspecified order when
creating a staging, if nothing better is available
- `priority` means PRs are picked first when staging, however if
`priority` PRs don't fill the staging the rest will be filled with
`default`, this mode did not previously exist
- `alone` means the PRs are picked first, before splits, and only
`alone` PRs can be part of the staging (which usually matches the
modename)
- `skipchecks` overrides both statuses and approval checks, for the
batch, something previously implied in `p=0`, but now
independent. Setting `skipchecks` basically makes the entire batch
`ready`.
For consistency this also sets the reviewer implicitly: since
skipchecks overrides both statuses *and approval*, whoever enables
this mode is essentially the reviewer.
- `cancel` cancels any ongoing staging when the marked PR becomes
ready again, previously this was also implied (in a more restricted
form) by setting `p=0`
FWBot removal
=============
While the "forwardport bot" still exists as an API level (to segregate
access rights between tokens) it has been removed as an interaction
point, as part of the modules merge plan. As a result,
fwbot stops responding
----------------------
Feedback messages are now always sent by the mergebot, the
forward-porting bot should not send any message or notification
anymore.
commands moved to the merge bot
-------------------------------
- `ignore`/`up to` simply changes bot
- `close` as well
- `skipci` is now a choice / flag of an `fw` command, which denotes
the forward-port policy,
- `fw=default` is the old `ci` and resets the policy to default,
that is wait for the PR to be merged to create forward ports, and
for the required statuses on each forward port to be received
before creating the next
- `fw=skipci` is the old `skipci`, it waits for the merge of the
base PR but then creates all the forward ports immediately (unless
it gets a conflict)
- `fw=skipmerge` immediately creates all the forward ports, without
even waiting for the PR to be merged
This is a completely new mode, and may be rather broken as until
now the 'bot has always assumed the source PR had been merged.
approval rework
---------------
Because of the previous section, there is no distinguishing feature
between `mergebot r+` = "merge this PR" and `forwardbot r+` = "merge
this PR and all its parent with different access rights".
As a result, the two have been merged under a single `mergebot r+`
with heuristics attempting to provide the best experience:
- if approving a non-forward port, the behavior does not change
- else, with review rights on the source, all ancestors are approved
- else, as author of the original, approves all ancestors which descend
from a merged PR
- else, approves all ancestors up to and including the oldest ancestor
to which we have review rights
Most notably, the source's author is not delegated on the source or
any of its descendants anymore. This might need to be revisited if it
provides too restrictive.
For the very specialized need of approving a forward-port *and none of
its ancestors*, `review=` can now take a comma (`,`) separated list of
pull request numbers (github numbers, not mergebot ids).
Computed State
==============
The `state` field of pull requests is now computed. Hopefully this
makes the status more consistent and predictable in the long run, and
importantly makes status management more reliable (because reference
datum get updated naturally flowing to the state).
For now however it makes things more complicated as some of the states
have to be separately signaled or updated:
- `closed` and `error` are now separate flags
- `merge_date` is pulled down from forwardport and becomes the
transition signal for ready -> merged
- `reviewed_by` becomes the transition signal for approval (might be a
good idea to rename it...)
- `status` is computed from the head's statuses and overrides, and
*that* becomes the validation state
Ideally, batch-level flags like `skipchecks` should be on, well, the
batch, and `state` should have a dependency on the batch. However
currently the batch is not a durable / permanent member of the system,
so it's a PR-level flag and a messy pile.
On notable change is that *forcing* the state to `ready` now does that
but also sets the reviewer, `skipchecks`, and overrides to ensure the
API-mediated readying does not get rolled back by e.g. the runbot
sending a status.
This is useful for a few types of automated / programmatic PRs
e.g. translation exports, where we set the state programmatically to
limit noise.
recursive dependency hack
-------------------------
Given a sequence of PRs with an override of the source, if one of the
PRs is updated its descendants should not have the override
anymore. However if the updated PR gets overridden, its descendants
should have *that* override.
This requires some unholy manipulations via an override of `modified`,
as the ORM supports recursive fields but not recursive
dependencies (on a different field).
unconditional followup scheduling
---------------------------------
Previously scheduling forward-port followup was contigent on the FW
policy, but it's not actually correct if the new PR is *immediately*
validated (which can happen now that the field is computed, if there
are no required statuses *or* all of the required statuses are
overridden by an ancestor) as nothing will trigger the state change
and thus scheduling of the fp followup.
The followup function checks all the properties of the batch to port,
so this should not result on incorrect ports. Although it's a bit more
expensive, and will lead to more spam.
Previously this would not happen because on creation of a PR the
validation task (commit -> PR) would still have to execute.
Misc Changes
============
- If a PR is marked as overriding / canceling stagings, it now does
so on retry not just when setting initially.
This was not handled at all previously, so a PR in P0 going into
error due to e.g. a non-deterministic bug would be retried and still
p=0, but a current staging would not get cancelled. Same when a PR
in p=0 goes into error because something was failed, then is updated
with a fix.
- Add tracking to a bunch of relevant PR fields.
Post-mortem analysis currently generally requires going through the
text logs to see what happened, which is annoying.
There is a nondeterminism / inconsistency in the tracking which
sometimes leads the admin user to trigger tracking before the bot
does, leading to the staging tracking being attributed to them
during tests, shove under the carpet by ignoring the user to whom
that tracking is attributed.
When multiple users update tracked fields in the same transaction
all the changes are attributed to the first one having triggered
tracking (?), I couldn't find why the admin sometimes takes over.
- added and leveraged support for enum-backed selection fields
- moved variuous fields from forwardport to runbot_merge
- fix a migration which had never worked and which never run (because
I forgot to bump the version on the module)
- remove some unnecessary intermediate de/serialisation
fixes #673, fixes #309, fixes #792, fixes #846 (probably)
2023-10-31 13:42:07 +07:00
pr1 . post_comment ( ' hansen close ' , other_token )
2020-03-02 19:42:07 +07:00
env . run_crons ( )
assert pr1 . state == ' closed '
assert pr1_id . state == ' closed '
2020-04-16 17:42:01 +07:00
def test_skip_ci_all ( env , config , make_repo ) :
2024-02-26 15:51:22 +07:00
prod , _ = make_basic ( env , config , make_repo )
2020-04-16 17:42:01 +07:00
with prod :
prod . make_commits ( ' a ' , Commit ( ' x ' , tree = { ' x ' : ' 0 ' } ) , ref = ' heads/change ' )
pr = prod . make_pr ( target = ' a ' , head = ' change ' )
prod . post_status ( pr . head , ' success ' , ' legal/cla ' )
prod . post_status ( pr . head , ' success ' , ' ci/runbot ' )
[CHG] *: rewrite commands set, rework status management
This commit revisits the commands set in order to make it more
regular, and limit inconsistent command-sets, although it includes
pseudo-command aliases for common tasks now removed from the core set.
Hard Errors
===========
The previous iteration of the commands set would ignore any
non-command term in a command line. This has been changed to hard
error (and ignoring the entire thing) if any command is unknown or
invalid.
This fixes inconsistent / unexpected interpretations where a user
sends a command, then writes a novel on the same line some words of
which happen to *also* be commands, leading to merge states they did
not expect. They should now be told to fuck off.
Priority Restructuring
----------------------
The numerical priority system was pretty messy in that it confused
"staging priority" (in ways which were not entirely straightforward)
with overrides to other concerns.
This has now being split along all the axis, with separate command
subsets for:
- staging prioritisation, now separated between `default`, `priority`,
and `alone`,
- `default` means PRs are picked by an unspecified order when
creating a staging, if nothing better is available
- `priority` means PRs are picked first when staging, however if
`priority` PRs don't fill the staging the rest will be filled with
`default`, this mode did not previously exist
- `alone` means the PRs are picked first, before splits, and only
`alone` PRs can be part of the staging (which usually matches the
modename)
- `skipchecks` overrides both statuses and approval checks, for the
batch, something previously implied in `p=0`, but now
independent. Setting `skipchecks` basically makes the entire batch
`ready`.
For consistency this also sets the reviewer implicitly: since
skipchecks overrides both statuses *and approval*, whoever enables
this mode is essentially the reviewer.
- `cancel` cancels any ongoing staging when the marked PR becomes
ready again, previously this was also implied (in a more restricted
form) by setting `p=0`
FWBot removal
=============
While the "forwardport bot" still exists as an API level (to segregate
access rights between tokens) it has been removed as an interaction
point, as part of the modules merge plan. As a result,
fwbot stops responding
----------------------
Feedback messages are now always sent by the mergebot, the
forward-porting bot should not send any message or notification
anymore.
commands moved to the merge bot
-------------------------------
- `ignore`/`up to` simply changes bot
- `close` as well
- `skipci` is now a choice / flag of an `fw` command, which denotes
the forward-port policy,
- `fw=default` is the old `ci` and resets the policy to default,
that is wait for the PR to be merged to create forward ports, and
for the required statuses on each forward port to be received
before creating the next
- `fw=skipci` is the old `skipci`, it waits for the merge of the
base PR but then creates all the forward ports immediately (unless
it gets a conflict)
- `fw=skipmerge` immediately creates all the forward ports, without
even waiting for the PR to be merged
This is a completely new mode, and may be rather broken as until
now the 'bot has always assumed the source PR had been merged.
approval rework
---------------
Because of the previous section, there is no distinguishing feature
between `mergebot r+` = "merge this PR" and `forwardbot r+` = "merge
this PR and all its parent with different access rights".
As a result, the two have been merged under a single `mergebot r+`
with heuristics attempting to provide the best experience:
- if approving a non-forward port, the behavior does not change
- else, with review rights on the source, all ancestors are approved
- else, as author of the original, approves all ancestors which descend
from a merged PR
- else, approves all ancestors up to and including the oldest ancestor
to which we have review rights
Most notably, the source's author is not delegated on the source or
any of its descendants anymore. This might need to be revisited if it
provides too restrictive.
For the very specialized need of approving a forward-port *and none of
its ancestors*, `review=` can now take a comma (`,`) separated list of
pull request numbers (github numbers, not mergebot ids).
Computed State
==============
The `state` field of pull requests is now computed. Hopefully this
makes the status more consistent and predictable in the long run, and
importantly makes status management more reliable (because reference
datum get updated naturally flowing to the state).
For now however it makes things more complicated as some of the states
have to be separately signaled or updated:
- `closed` and `error` are now separate flags
- `merge_date` is pulled down from forwardport and becomes the
transition signal for ready -> merged
- `reviewed_by` becomes the transition signal for approval (might be a
good idea to rename it...)
- `status` is computed from the head's statuses and overrides, and
*that* becomes the validation state
Ideally, batch-level flags like `skipchecks` should be on, well, the
batch, and `state` should have a dependency on the batch. However
currently the batch is not a durable / permanent member of the system,
so it's a PR-level flag and a messy pile.
On notable change is that *forcing* the state to `ready` now does that
but also sets the reviewer, `skipchecks`, and overrides to ensure the
API-mediated readying does not get rolled back by e.g. the runbot
sending a status.
This is useful for a few types of automated / programmatic PRs
e.g. translation exports, where we set the state programmatically to
limit noise.
recursive dependency hack
-------------------------
Given a sequence of PRs with an override of the source, if one of the
PRs is updated its descendants should not have the override
anymore. However if the updated PR gets overridden, its descendants
should have *that* override.
This requires some unholy manipulations via an override of `modified`,
as the ORM supports recursive fields but not recursive
dependencies (on a different field).
unconditional followup scheduling
---------------------------------
Previously scheduling forward-port followup was contigent on the FW
policy, but it's not actually correct if the new PR is *immediately*
validated (which can happen now that the field is computed, if there
are no required statuses *or* all of the required statuses are
overridden by an ancestor) as nothing will trigger the state change
and thus scheduling of the fp followup.
The followup function checks all the properties of the batch to port,
so this should not result on incorrect ports. Although it's a bit more
expensive, and will lead to more spam.
Previously this would not happen because on creation of a PR the
validation task (commit -> PR) would still have to execute.
Misc Changes
============
- If a PR is marked as overriding / canceling stagings, it now does
so on retry not just when setting initially.
This was not handled at all previously, so a PR in P0 going into
error due to e.g. a non-deterministic bug would be retried and still
p=0, but a current staging would not get cancelled. Same when a PR
in p=0 goes into error because something was failed, then is updated
with a fix.
- Add tracking to a bunch of relevant PR fields.
Post-mortem analysis currently generally requires going through the
text logs to see what happened, which is annoying.
There is a nondeterminism / inconsistency in the tracking which
sometimes leads the admin user to trigger tracking before the bot
does, leading to the staging tracking being attributed to them
during tests, shove under the carpet by ignoring the user to whom
that tracking is attributed.
When multiple users update tracked fields in the same transaction
all the changes are attributed to the first one having triggered
tracking (?), I couldn't find why the admin sometimes takes over.
- added and leveraged support for enum-backed selection fields
- moved variuous fields from forwardport to runbot_merge
- fix a migration which had never worked and which never run (because
I forgot to bump the version on the module)
- remove some unnecessary intermediate de/serialisation
fixes #673, fixes #309, fixes #792, fixes #846 (probably)
2023-10-31 13:42:07 +07:00
pr . post_comment ( ' hansen fw=skipci ' , config [ ' role_reviewer ' ] [ ' token ' ] )
2020-04-16 17:42:01 +07:00
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
2024-12-02 20:17:28 +07:00
assert to_pr ( env , pr ) . batch_id . fw_policy == ' skipci '
2020-04-16 17:42:01 +07:00
with prod :
prod . post_status ( ' staging.a ' , ' success ' , ' legal/cla ' )
prod . post_status ( ' staging.a ' , ' success ' , ' ci/runbot ' )
env . run_crons ( )
# run cron a few more times for the fps
env . run_crons ( )
env . run_crons ( )
env . run_crons ( )
pr0_id , pr1_id , pr2_id = env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' )
assert pr1_id . state == ' opened '
assert pr1_id . source_id == pr0_id
assert pr2_id . state == ' opened '
assert pr2_id . source_id == pr0_id
def test_skip_ci_next ( env , config , make_repo ) :
2024-02-26 15:51:22 +07:00
prod , _ = make_basic ( env , config , make_repo )
2020-04-16 17:42:01 +07:00
with prod :
prod . make_commits ( ' a ' , Commit ( ' x ' , tree = { ' x ' : ' 0 ' } ) , ref = ' heads/change ' )
pr = prod . make_pr ( target = ' a ' , head = ' change ' )
prod . post_status ( pr . head , ' success ' , ' legal/cla ' )
prod . post_status ( pr . head , ' success ' , ' ci/runbot ' )
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
with prod :
prod . post_status ( ' staging.a ' , ' success ' , ' legal/cla ' )
prod . post_status ( ' staging.a ' , ' success ' , ' ci/runbot ' )
env . run_crons ( )
pr0_id , pr1_id = env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' )
with prod :
prod . get_pr ( pr1_id . number ) . post_comment (
[CHG] *: rewrite commands set, rework status management
This commit revisits the commands set in order to make it more
regular, and limit inconsistent command-sets, although it includes
pseudo-command aliases for common tasks now removed from the core set.
Hard Errors
===========
The previous iteration of the commands set would ignore any
non-command term in a command line. This has been changed to hard
error (and ignoring the entire thing) if any command is unknown or
invalid.
This fixes inconsistent / unexpected interpretations where a user
sends a command, then writes a novel on the same line some words of
which happen to *also* be commands, leading to merge states they did
not expect. They should now be told to fuck off.
Priority Restructuring
----------------------
The numerical priority system was pretty messy in that it confused
"staging priority" (in ways which were not entirely straightforward)
with overrides to other concerns.
This has now being split along all the axis, with separate command
subsets for:
- staging prioritisation, now separated between `default`, `priority`,
and `alone`,
- `default` means PRs are picked by an unspecified order when
creating a staging, if nothing better is available
- `priority` means PRs are picked first when staging, however if
`priority` PRs don't fill the staging the rest will be filled with
`default`, this mode did not previously exist
- `alone` means the PRs are picked first, before splits, and only
`alone` PRs can be part of the staging (which usually matches the
modename)
- `skipchecks` overrides both statuses and approval checks, for the
batch, something previously implied in `p=0`, but now
independent. Setting `skipchecks` basically makes the entire batch
`ready`.
For consistency this also sets the reviewer implicitly: since
skipchecks overrides both statuses *and approval*, whoever enables
this mode is essentially the reviewer.
- `cancel` cancels any ongoing staging when the marked PR becomes
ready again, previously this was also implied (in a more restricted
form) by setting `p=0`
FWBot removal
=============
While the "forwardport bot" still exists as an API level (to segregate
access rights between tokens) it has been removed as an interaction
point, as part of the modules merge plan. As a result,
fwbot stops responding
----------------------
Feedback messages are now always sent by the mergebot, the
forward-porting bot should not send any message or notification
anymore.
commands moved to the merge bot
-------------------------------
- `ignore`/`up to` simply changes bot
- `close` as well
- `skipci` is now a choice / flag of an `fw` command, which denotes
the forward-port policy,
- `fw=default` is the old `ci` and resets the policy to default,
that is wait for the PR to be merged to create forward ports, and
for the required statuses on each forward port to be received
before creating the next
- `fw=skipci` is the old `skipci`, it waits for the merge of the
base PR but then creates all the forward ports immediately (unless
it gets a conflict)
- `fw=skipmerge` immediately creates all the forward ports, without
even waiting for the PR to be merged
This is a completely new mode, and may be rather broken as until
now the 'bot has always assumed the source PR had been merged.
approval rework
---------------
Because of the previous section, there is no distinguishing feature
between `mergebot r+` = "merge this PR" and `forwardbot r+` = "merge
this PR and all its parent with different access rights".
As a result, the two have been merged under a single `mergebot r+`
with heuristics attempting to provide the best experience:
- if approving a non-forward port, the behavior does not change
- else, with review rights on the source, all ancestors are approved
- else, as author of the original, approves all ancestors which descend
from a merged PR
- else, approves all ancestors up to and including the oldest ancestor
to which we have review rights
Most notably, the source's author is not delegated on the source or
any of its descendants anymore. This might need to be revisited if it
provides too restrictive.
For the very specialized need of approving a forward-port *and none of
its ancestors*, `review=` can now take a comma (`,`) separated list of
pull request numbers (github numbers, not mergebot ids).
Computed State
==============
The `state` field of pull requests is now computed. Hopefully this
makes the status more consistent and predictable in the long run, and
importantly makes status management more reliable (because reference
datum get updated naturally flowing to the state).
For now however it makes things more complicated as some of the states
have to be separately signaled or updated:
- `closed` and `error` are now separate flags
- `merge_date` is pulled down from forwardport and becomes the
transition signal for ready -> merged
- `reviewed_by` becomes the transition signal for approval (might be a
good idea to rename it...)
- `status` is computed from the head's statuses and overrides, and
*that* becomes the validation state
Ideally, batch-level flags like `skipchecks` should be on, well, the
batch, and `state` should have a dependency on the batch. However
currently the batch is not a durable / permanent member of the system,
so it's a PR-level flag and a messy pile.
On notable change is that *forcing* the state to `ready` now does that
but also sets the reviewer, `skipchecks`, and overrides to ensure the
API-mediated readying does not get rolled back by e.g. the runbot
sending a status.
This is useful for a few types of automated / programmatic PRs
e.g. translation exports, where we set the state programmatically to
limit noise.
recursive dependency hack
-------------------------
Given a sequence of PRs with an override of the source, if one of the
PRs is updated its descendants should not have the override
anymore. However if the updated PR gets overridden, its descendants
should have *that* override.
This requires some unholy manipulations via an override of `modified`,
as the ORM supports recursive fields but not recursive
dependencies (on a different field).
unconditional followup scheduling
---------------------------------
Previously scheduling forward-port followup was contigent on the FW
policy, but it's not actually correct if the new PR is *immediately*
validated (which can happen now that the field is computed, if there
are no required statuses *or* all of the required statuses are
overridden by an ancestor) as nothing will trigger the state change
and thus scheduling of the fp followup.
The followup function checks all the properties of the batch to port,
so this should not result on incorrect ports. Although it's a bit more
expensive, and will lead to more spam.
Previously this would not happen because on creation of a PR the
validation task (commit -> PR) would still have to execute.
Misc Changes
============
- If a PR is marked as overriding / canceling stagings, it now does
so on retry not just when setting initially.
This was not handled at all previously, so a PR in P0 going into
error due to e.g. a non-deterministic bug would be retried and still
p=0, but a current staging would not get cancelled. Same when a PR
in p=0 goes into error because something was failed, then is updated
with a fix.
- Add tracking to a bunch of relevant PR fields.
Post-mortem analysis currently generally requires going through the
text logs to see what happened, which is annoying.
There is a nondeterminism / inconsistency in the tracking which
sometimes leads the admin user to trigger tracking before the bot
does, leading to the staging tracking being attributed to them
during tests, shove under the carpet by ignoring the user to whom
that tracking is attributed.
When multiple users update tracked fields in the same transaction
all the changes are attributed to the first one having triggered
tracking (?), I couldn't find why the admin sometimes takes over.
- added and leveraged support for enum-backed selection fields
- moved variuous fields from forwardport to runbot_merge
- fix a migration which had never worked and which never run (because
I forgot to bump the version on the module)
- remove some unnecessary intermediate de/serialisation
fixes #673, fixes #309, fixes #792, fixes #846 (probably)
2023-10-31 13:42:07 +07:00
' hansen fw=skipci ' ,
config [ ' role_reviewer ' ] [ ' token ' ]
2020-04-16 17:42:01 +07:00
)
2024-05-21 20:44:47 +07:00
assert pr0_id . batch_id . fw_policy == ' skipci '
2020-04-16 17:42:01 +07:00
env . run_crons ( )
_ , _ , pr2_id = env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' )
assert pr1_id . state == ' opened '
assert pr2_id . state == ' opened '
2021-08-11 16:36:35 +07:00
2022-02-10 19:42:32 +07:00
def test_retarget_after_freeze ( env , config , make_repo , users ) :
""" Turns out it was possible to trip the forwardbot if you ' re a bit of a
dick : the forward port cron was not resilient to forward port failure in
case of filling in new branches ( forward ports existing across a branch
insertion so the fwbot would have to " fill in " for the new branch ) .
But it turns out causing such failure is possible by e . g . regargeting the
latter port . In that case the reinsertion task should just do nothing , and
the retargeted PR should be forward - ported normally once merged .
"""
2024-02-26 15:51:22 +07:00
prod , _ = make_basic ( env , config , make_repo )
project = env [ ' runbot_merge.project ' ] . search ( [ ] )
2022-02-10 19:42:32 +07:00
with prod :
[ c ] = prod . make_commits ( ' b ' , Commit ( ' thing ' , tree = { ' x ' : ' 1 ' } ) , ref = ' heads/mypr ' )
pr = prod . make_pr ( target = ' b ' , head = ' mypr ' )
prod . post_status ( c , ' success ' , ' ci/runbot ' )
prod . post_status ( c , ' success ' , ' legal/cla ' )
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
original_pr_id = to_pr ( env , pr )
assert original_pr_id . state == ' ready '
assert original_pr_id . staging_id
with prod :
prod . post_status ( ' staging.b ' , ' success ' , ' ci/runbot ' )
prod . post_status ( ' staging.b ' , ' success ' , ' legal/cla ' )
env . run_crons ( )
# should have created a pr targeted to C
port_id = env [ ' runbot_merge.pull_requests ' ] . search ( [ ( ' state ' , ' not in ' , ( ' merged ' , ' closed ' ) ) ] )
assert len ( port_id ) == 1
assert port_id . target . name == ' c '
assert port_id . source_id == original_pr_id
assert port_id . parent_id == original_pr_id
2022-12-08 16:42:12 +07:00
branch_c , branch_b , branch_a = branches_before = project . branch_ids
2022-02-10 19:42:32 +07:00
assert [ branch_a . name , branch_b . name , branch_c . name ] == [ ' a ' , ' b ' , ' c ' ]
# create branch so cron runs correctly
with prod : prod . make_ref ( ' heads/bprime ' , prod . get_ref ( ' c ' ) )
project . write ( {
' branch_ids ' : [
2022-12-08 16:42:12 +07:00
( 1 , branch_c . id , { ' sequence ' : 1 } ) ,
2023-06-08 13:19:43 +07:00
( 0 , 0 , { ' name ' : ' bprime ' , ' sequence ' : 2 } ) ,
2022-12-08 16:42:12 +07:00
( 1 , branch_b . id , { ' sequence ' : 3 } ) ,
( 1 , branch_a . id , { ' sequence ' : 4 } ) ,
2022-02-10 19:42:32 +07:00
]
} )
new_branch = project . branch_ids - branches_before
assert new_branch . name == ' bprime '
# should have added a job for the new fp
job = env [ ' forwardport.batches ' ] . search ( [ ] )
assert job
# fuck up yo life: retarget the existing FP PR to the new branch
2023-01-20 21:16:37 +07:00
port_pr = prod . get_pr ( port_id . number )
with prod :
port_pr . base = ' bprime '
assert port_id . target == new_branch
2022-02-10 19:42:32 +07:00
2024-07-30 18:45:51 +07:00
env . run_crons ( None )
2022-02-10 19:42:32 +07:00
assert not job . exists ( ) , " job should have succeeded and apoptosed "
# since the PR was "already forward-ported" to the new branch it should not
# be touched
assert env [ ' runbot_merge.pull_requests ' ] . search ( [ ( ' state ' , ' not in ' , ( ' merged ' , ' closed ' ) ) ] ) == port_id
# merge the retargered PR
with prod :
prod . post_status ( port_pr . head , ' success ' , ' ci/runbot ' )
prod . post_status ( port_pr . head , ' success ' , ' legal/cla ' )
port_pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
with prod :
prod . post_status ( ' staging.bprime ' , ' success ' , ' ci/runbot ' )
prod . post_status ( ' staging.bprime ' , ' success ' , ' legal/cla ' )
env . run_crons ( )
[CHG] *: persistent batches
This probably has latent bugs, and is only the start of the road to v2
(#789): PR batches are now created up-front (alongside the PR), with
PRs attached and detached as needed, hopefully such that things are
not broken (tests pass but...), this required a fair number of
ajustments to code not taking batches into account, or creating
batches on the fly.
`PullRequests.blocked` has also been updated to rely on the batch to
get its batch-mates, such that it can now be a stored field with the
right dependencies.
The next step is to better leverage this change:
- move cross-PR state up to the batch (e.g. skipchecks, priority, ...)
- add fw info to the batch, perform forward-ports batchwise in order
to avoid redundant batch-selection work, and allow altering batches
during fw (e.g. adding or removing PRs)
- use batches to select stagings
- maybe expose staging history of a batch?
2023-12-19 17:10:11 +07:00
# #2 batch 6 (???)
assert port_id . state == ' merged '
2022-02-10 19:42:32 +07:00
new_pr_id = env [ ' runbot_merge.pull_requests ' ] . search ( [ ( ' state ' , ' not in ' , ( ' merged ' , ' closed ' ) ) ] )
assert len ( new_pr_id ) == 1
assert new_pr_id . parent_id == port_id
assert new_pr_id . target == branch_c
2021-08-11 16:36:35 +07:00
def test_approve_draft ( env , config , make_repo , users ) :
2024-02-26 15:51:22 +07:00
prod , _ = make_basic ( env , config , make_repo )
2021-08-11 16:36:35 +07:00
with prod :
prod . make_commits ( ' a ' , Commit ( ' x ' , tree = { ' x ' : ' 0 ' } ) , ref = ' heads/change ' )
pr = prod . make_pr ( target = ' a ' , head = ' change ' , draft = True )
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
pr_id = to_pr ( env , pr )
assert pr_id . state == ' opened '
assert pr . comments == [
( users [ ' reviewer ' ] , ' hansen r+ ' ) ,
seen ( env , pr , users ) ,
2023-10-16 15:46:29 +07:00
( users [ ' user ' ] , f " @ { users [ ' reviewer ' ] } draft PRs can not be approved. " ) ,
2021-08-11 16:36:35 +07:00
]
with prod :
pr . draft = False
assert pr . draft is False
with prod :
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
assert pr_id . state == ' approved '
2022-02-08 16:11:57 +07:00
def test_freeze ( env , config , make_repo , users ) :
""" Freeze:
- should not forward - port the freeze PRs themselves
2024-06-07 20:02:28 +07:00
- unmerged forward ports need to be backfilled
- if the tip of the forward port is approved , the backfilled forward port
should also be
2022-02-08 16:11:57 +07:00
"""
2024-06-07 20:02:28 +07:00
prod , _ = make_basic ( env , config , make_repo , statuses = ' default ' )
2024-02-26 15:51:22 +07:00
project = env [ ' runbot_merge.project ' ] . search ( [ ] )
2024-06-07 20:02:28 +07:00
2022-02-08 16:11:57 +07:00
# branches here are "a" (older), "b", and "c" (master)
with prod :
[ root , _ ] = prod . make_commits (
None ,
Commit ( ' base ' , tree = { ' version ' : ' ' , ' f ' : ' 0 ' } ) ,
Commit ( ' release 1.0 ' , tree = { ' version ' : ' 1.0 ' } ) ,
ref = ' heads/b '
)
prod . make_commits ( root , Commit ( ' other ' , tree = { ' f ' : ' 1 ' } ) , ref = ' heads/c ' )
2024-06-07 20:02:28 +07:00
# region PR which is forward ported but the FPs are not merged (they are approved)
with prod :
prod . make_commits ( " a " , Commit ( " stuff " , tree = { ' x ' : ' 0 ' } ) , ref = " heads/abranch " )
p = prod . make_pr ( target = ' a ' , head = ' abranch ' )
p . post_comment ( " hansen r+ fw=skipci " , config [ ' role_reviewer ' ] [ ' token ' ] )
prod . post_status ( ' abranch ' , ' success ' )
env . run_crons ( )
with prod :
prod . post_status ( ' staging.a ' , ' success ' )
env . run_crons ( )
pr_a_id , pr_b_id , pr_c_id = pr_ids = env [ ' runbot_merge.pull_requests ' ] . search ( [ ] , order = ' number ' )
assert len ( pr_ids ) == 3 , \
" should have created two forward ports, one in b and one in c (/ master) "
# endregion
2022-02-08 16:11:57 +07:00
with prod :
prod . make_commits (
' c ' ,
Commit ( ' Release 1.1 ' , tree = { ' version ' : ' 1.1 ' } ) ,
ref = ' heads/release-1.1 '
)
release = prod . make_pr ( target = ' c ' , head = ' release-1.1 ' )
env . run_crons ( )
2024-06-07 20:02:28 +07:00
# approve pr_c_id but don't actually merge it before freezing
with prod :
prod . post_status ( pr_b_id . head , ' success ' )
prod . post_status ( pr_c_id . head , ' success ' )
prod . get_pr ( pr_c_id . number ) . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
# review comment should be handled eagerly
assert pr_b_id . reviewed_by
assert pr_c_id . reviewed_by
2022-02-08 16:11:57 +07:00
w = project . action_prepare_freeze ( )
assert w [ ' res_model ' ] == ' runbot_merge.project.freeze '
w_id = env [ w [ ' res_model ' ] ] . browse ( [ w [ ' res_id ' ] ] )
assert w_id . release_pr_ids . repository_id . name == prod . name
release_id = to_pr ( env , release )
w_id . release_pr_ids . pr_id = release_id . id
assert not w_id . errors
w_id . action_freeze ( )
[FIX] runbot_merge: make freeze wizard labels lookup not shit
I DECLARE BANKRUPTCY!!!
The previous implementation of labels lookup was really not
intuitive (it was just a char field, and matched labels by equality
including the owner tag), and was also full of broken edge
cases (e.g. traceback if a label matched multiple PRs in the same repo
because people reuse branch names).
Tried messing about with contextual `display_name` and `name_search`
on PRs but the client goes wonky in that case, and there is no clean
autocomplete for non-relational fields.
So created a view which reifies labels, and that can be used as the
basis for our search. It doesn't have to be maintained by hand, can be
searched somewhat flexibly, we can add new view fields in the future
if desirable, and it seems to work fine providing a nice
understandable UX, with the reliability of using a normal Odoo model
the normal way.
Also fixed the handling of bump PRs, clearly clearing the entire field
before trying to update existing records (even with a link_to
inbetween) is not the web client's fancy, re-selecting the current
label would just empty the thing entirely.
So use a two-step process slightly closer to the release PRs instead:
- first update or delete the existing bump PRs
- then add the new ones
The second part is because bump PRs are somewhat less critical than
release, so it can be a bit more DWIM compared to the more deliberate
process of release PRs where first the list of repositories involved
has to be set up just so, then the PRs can be filled in each of them.
Fixes #697
2023-01-24 17:19:18 +07:00
2024-06-07 20:02:28 +07:00
assert project . branch_ids . mapped ( ' name ' ) == [ ' c ' , ' post-b ' , ' b ' , ' a ' ]
[FIX] runbot_merge: make freeze wizard labels lookup not shit
I DECLARE BANKRUPTCY!!!
The previous implementation of labels lookup was really not
intuitive (it was just a char field, and matched labels by equality
including the owner tag), and was also full of broken edge
cases (e.g. traceback if a label matched multiple PRs in the same repo
because people reuse branch names).
Tried messing about with contextual `display_name` and `name_search`
on PRs but the client goes wonky in that case, and there is no clean
autocomplete for non-relational fields.
So created a view which reifies labels, and that can be used as the
basis for our search. It doesn't have to be maintained by hand, can be
searched somewhat flexibly, we can add new view fields in the future
if desirable, and it seems to work fine providing a nice
understandable UX, with the reliability of using a normal Odoo model
the normal way.
Also fixed the handling of bump PRs, clearly clearing the entire field
before trying to update existing records (even with a link_to
inbetween) is not the web client's fancy, re-selecting the current
label would just empty the thing entirely.
So use a two-step process slightly closer to the release PRs instead:
- first update or delete the existing bump PRs
- then add the new ones
The second part is because bump PRs are somewhat less critical than
release, so it can be a bit more DWIM compared to the more deliberate
process of release PRs where first the list of repositories involved
has to be set up just so, then the PRs can be filled in each of them.
Fixes #697
2023-01-24 17:19:18 +07:00
# re-enable forward-port cron after freeze
_ , cron_id = env [ ' ir.model.data ' ] . check_object_reference ( ' forwardport ' , ' port_forward ' , context = { ' active_test ' : False } )
env [ ' ir.cron ' ] . browse ( [ cron_id ] ) . active = True
2024-07-30 18:45:51 +07:00
env . run_crons ( ' forwardport.port_forward ' )
2022-02-08 16:11:57 +07:00
assert release_id . state == ' merged '
assert not env [ ' runbot_merge.pull_requests ' ] . search ( [
2024-06-07 20:02:28 +07:00
( ' source_id ' , ' = ' , release_id . id ) ,
2022-02-08 16:11:57 +07:00
] ) , " the release PRs should not be forward-ported "
2022-10-28 13:03:12 +07:00
2024-06-07 20:02:28 +07:00
assert env [ ' runbot_merge.stagings ' ] . search_count ( [ ] ) == 2 , \
" b and c forward ports should be staged since they were ready before freeze "
# an intermediate PR should have been created
pr_inserted = env [ ' runbot_merge.pull_requests ' ] . search ( [
( ' source_id ' , ' = ' , pr_a_id . id ) ,
( ' target.name ' , ' = ' , ' post-b ' ) ,
] )
assert pr_inserted , " an intermediate PR should have been reinsered in the sequence "
assert pr_c_id . parent_id == pr_inserted
assert pr_inserted . parent_id == pr_b_id
assert pr_inserted . reviewed_by == pr_c_id . reviewed_by , \
" review state should have been copied over from c (master) "
with prod :
prod . post_status ( pr_inserted . head , ' success ' )
prod . post_status ( ' staging.b ' , ' success ' )
prod . post_status ( ' staging.c ' , ' success ' )
env . run_crons ( )
with prod :
prod . post_status ( ' staging.post-b ' , ' success ' )
env . run_crons ( )
assert env [ ' runbot_merge.pull_requests ' ] . search_count ( [ ( ' state ' , ' = ' , ' merged ' ) ] ) \
== len ( [ ' release ' , ' initial ' , ' fw-b ' , ' fw-post-b ' , ' fw-c ' ] )
2024-06-11 20:41:03 +07:00
@pytest.mark.expect_log_errors ( reason = " missing / invalid head causes an error to be logged " )
2022-10-28 13:03:12 +07:00
def test_missing_magic_ref ( env , config , make_repo ) :
""" There are cases where github fails to create / publish or fails to update
the magic refs in refs / pull / * .
In that case , pulling from the regular remote does not bring in the contents
of the PR we ' re trying to forward port, and the forward porting process
fails .
Emulate this behaviour by updating the PR with a commit which lives in the
repo but has no ref .
"""
2024-09-19 17:17:59 +07:00
prod , _ = make_basic ( env , config , make_repo , statuses = ' default ' )
2022-10-28 13:03:12 +07:00
a_head = prod . commit ( ' refs/heads/a ' )
with prod :
[ c ] = prod . make_commits ( a_head . id , Commit ( ' x ' , tree = { ' x ' : ' 0 ' } ) , ref = ' heads/change ' )
pr = prod . make_pr ( target = ' a ' , head = ' change ' )
2024-09-19 17:17:59 +07:00
prod . post_status ( c , ' success ' )
2022-10-28 13:03:12 +07:00
pr . post_comment ( ' hansen r+ ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
# create variant of pr head in fork, update PR with that commit as head so
# it's not found after a fetch, simulating an outdated or missing magic ref
pr_id = to_pr ( env , pr )
assert pr_id . staging_id
2024-09-18 20:19:13 +07:00
with prevent_unstaging ( pr_id . staging_id ) :
pr_id . head = ' 0 ' * 40
2022-10-28 13:03:12 +07:00
with prod :
2024-09-19 17:17:59 +07:00
prod . post_status ( ' staging.a ' , ' success ' )
2022-10-28 13:03:12 +07:00
env . run_crons ( )
assert not pr_id . staging_id
assert pr_id . state == ' merged '
# check that the fw failed
assert not env [ ' runbot_merge.pull_requests ' ] . search ( [ ( ' source_id ' , ' = ' , pr_id . id ) ] ) , \
" forward port should not have been created "
# check that the batch is still here and targeted for the future
req = env [ ' forwardport.batches ' ] . search ( [ ] )
assert len ( req ) == 1
2024-07-30 18:45:51 +07:00
assert req . retry_after > datetime . utcnow ( ) . isoformat ( " " , " seconds " )
2022-10-28 13:03:12 +07:00
# reset retry_after
req . retry_after = ' 1900-01-01 01:01:01 '
# add a real commit
with prod :
[ c2 ] = prod . make_commits ( a_head . id , Commit ( ' y ' , tree = { ' x ' : ' 0 ' } ) )
assert c2 != c
pr_id . head = c2
2024-07-30 18:45:51 +07:00
env . run_crons ( None )
2022-10-28 13:03:12 +07:00
fp_id = env [ ' runbot_merge.pull_requests ' ] . search ( [ ( ' source_id ' , ' = ' , pr_id . id ) ] )
assert fp_id
# the cherrypick process fetches the commits on the PR in order to know
# what they are (rather than e.g. diff the HEAD it branch with the target)
# as a result it doesn't forwardport our fake, we'd have to reset the PR's
# branch for that to happen
2024-02-23 19:46:37 +07:00
def test_disable_branch_with_batches ( env , config , make_repo , users ) :
""" We want to avoid losing pull requests, so when deactivating a branch,
if there are * forward port * batches targeting that branch which have not
been forward ported yet port them over , as if their source had been merged
after the branch was disabled ( thus skipped over )
"""
2024-02-26 15:51:22 +07:00
repo , fork = make_basic ( env , config , make_repo , statuses = " default " )
proj = env [ ' runbot_merge.project ' ] . search ( [ ] )
2024-02-23 19:46:37 +07:00
branch_b = env [ ' runbot_merge.branch ' ] . search ( [ ( ' name ' , ' = ' , ' b ' ) ] )
assert branch_b
# region repo2 creation & setup
repo2 = make_repo ( ' proj2 ' )
with repo2 :
[ a , b , c ] = repo2 . make_commits (
None ,
Commit ( " a " , tree = { " f " : " a " } ) ,
Commit ( " b " , tree = { " g " : " b " } ) ,
Commit ( " c " , tree = { " h " : " c " } ) ,
)
repo2 . make_ref ( " heads/a " , a )
repo2 . make_ref ( " heads/b " , b )
repo2 . make_ref ( " heads/c " , c )
fork2 = repo2 . fork ( )
repo2_id = env [ ' runbot_merge.repository ' ] . create ( {
" project_id " : proj . id ,
" name " : repo2 . name ,
" required_statuses " : " default " ,
" fp_remote_target " : fork2 . name ,
} )
[ADD] *: per-repository webhook secret
Currently webhook secrets are configured per *project* which is an
issue both because different repositories may have different
administrators and thus creates safety concerns, and because multiple
repositories can feed into different projects (e.g. on mergebot,
odoo-dev/odoo is both an ancillary repository to the main RD project,
and the main repository to the minor / legacy master-wowl
project). This means it can be necessary to have multiple projects
share the same secret as well, this then mandates the secret for more
repositories per (1).
This is a pain in the ass, so just detach secrets from projects and
link them *only* to repositories, it's cleaner and easier to manage
and set up progressively.
This requires a lot of changes to the tests, as they all need to
correctly configure the signaling.
For `runbot_merge` there was *some* setup sharing already via the
module-level `repo` fixtures`, those were merged into a conftest-level
fixture which could handle the signaling setup. A few tests which
unnecessarily set up repositories ad-hoc were also moved to the
fixture. But for most of the ad-hoc setup in `runbot_merge`, as well
as `forwardport` where it's all ad-hoc, events sources setup was just
appended as is. This should probably be cleaned up at one point, with
the various requirements collected and organised into a small set of
fixtures doing the job more uniformly.
Fixes #887
2024-06-06 16:07:57 +07:00
env [ ' runbot_merge.events_sources ' ] . create ( { ' repository ' : repo2 . name } )
2024-02-23 19:46:37 +07:00
env [ ' res.partner ' ] . search ( [
( ' github_login ' , ' = ' , config [ ' role_reviewer ' ] [ ' user ' ] )
] ) . write ( {
' review_rights ' : [ ( 0 , 0 , { ' repository_id ' : repo2_id . id , ' review ' : True } ) ]
} )
env [ ' res.partner ' ] . search ( [
( ' github_login ' , ' = ' , config [ ' role_self_reviewer ' ] [ ' user ' ] )
] ) . write ( {
' review_rights ' : [ ( 0 , 0 , { ' repository_id ' : repo2_id . id , ' self_review ' : True } ) ]
} )
# endregion
2024-06-28 17:22:23 +07:00
# region set up forward ported batches
2024-02-23 19:46:37 +07:00
with repo , fork , repo2 , fork2 :
fork . make_commits ( " a " , Commit ( " x " , tree = { " x " : " 1 " } ) , ref = " heads/x " )
pr1_a = repo . make_pr ( title = " X " , target = " a " , head = f " { fork . owner } :x " )
pr1_a . post_comment ( " hansen r+ " , config [ ' role_reviewer ' ] [ ' token ' ] )
repo . post_status ( pr1_a . head , " success " )
fork2 . make_commits ( " a " , Commit ( " x " , tree = { " x " : " 1 " } ) , ref = " heads/x " )
pr2_a = repo2 . make_pr ( title = " X " , target = " a " , head = f " { fork2 . owner } :x " )
pr2_a . post_comment ( " hansen r+ " , config [ ' role_reviewer ' ] [ ' token ' ] )
repo2 . post_status ( pr2_a . head , " success " )
2024-06-28 17:22:23 +07:00
fork . make_commits ( " a " , Commit ( " y " , tree = { " y " : " 1 " } ) , ref = " heads/y " )
pr3_a = repo . make_pr ( title = " Y " , target = " a " , head = f " { fork . owner } :y " )
pr3_a . post_comment ( " hansen r+ " , config [ ' role_reviewer ' ] [ ' token ' ] )
repo . post_status ( pr3_a . head , ' success ' )
2024-02-23 19:46:37 +07:00
# remove just pr2 from the forward ports (maybe?)
pr2_a_id = to_pr ( env , pr2_a )
pr2_a_id . limit_id = branch_b . id
env . run_crons ( )
assert pr2_a_id . limit_id == branch_b
# endregion
with repo , repo2 :
repo . post_status ( ' staging.a ' , ' success ' )
repo2 . post_status ( ' staging.a ' , ' success ' )
env . run_crons ( )
PullRequests = env [ ' runbot_merge.pull_requests ' ]
pr1_b_id = PullRequests . search ( [ ( ' parent_id ' , ' = ' , to_pr ( env , pr1_a ) . id ) ] )
pr2_b_id = PullRequests . search ( [ ( ' parent_id ' , ' = ' , pr2_a_id . id ) ] )
2024-06-28 17:22:23 +07:00
pr3_b_id = PullRequests . search ( [ ( ' parent_id ' , ' = ' , to_pr ( env , pr3_a ) . id ) ] )
2024-02-23 19:46:37 +07:00
assert pr1_b_id . parent_id
assert pr1_b_id . state == ' opened '
assert pr2_b_id . parent_id
assert pr2_b_id . state == ' opened '
2024-06-28 17:22:23 +07:00
assert pr3_b_id . parent_id
assert pr3_b_id . state == ' opened '
# detach pr3 (?)
pr3_b_id . write ( { ' parent_id ' : False , ' detach_reason ' : ' because ' } )
2024-02-23 19:46:37 +07:00
b_id = proj . branch_ids . filtered ( lambda b : b . name == ' b ' )
proj . write ( {
' branch_ids ' : [ ( 1 , b_id . id , { ' active ' : False } ) ]
} )
env . run_crons ( )
assert not b_id . active
2024-06-28 17:22:23 +07:00
# pr1_a, pr1_b, pr1_c, pr2_a, pr2_b, pr3_a, pr3_b, pr3_c
assert PullRequests . search_count ( [ ] ) == 8 , " should have ported pr1 and pr3 but not pr2 "
assert PullRequests . search_count ( [ ( ' parent_id ' , ' = ' , pr1_b_id . id ) ] )
assert PullRequests . search_count ( [ ( ' parent_id ' , ' = ' , pr3_b_id . id ) ] )
2024-02-23 19:46:37 +07:00
assert repo . get_pr ( pr1_b_id . number ) . comments == [
seen ( env , repo . get_pr ( pr1_b_id . number ) , users ) ,
( users [ ' user ' ] , " This PR targets b and is part of the forward-port chain. Further PRs will be created up to c. \n \n More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port \n " ) ,
( users [ ' user ' ] , " @ {user} @ {reviewer} the target branch ' b ' has been disabled, you may want to close this PR. \n \n As this was not its limit, it will automatically be forward ported to the next active branch. " . format_map ( users ) ) ,
]
assert repo2 . get_pr ( pr2_b_id . number ) . comments == [
seen ( env , repo2 . get_pr ( pr2_b_id . number ) , users ) ,
( users [ ' user ' ] , """ \
@ { user } @ { reviewer } this PR targets b and is the last of the forward - port chain .
To merge the full chain , use
> @hansen r +
More info at https : / / github . com / odoo / odoo / wiki / Mergebot #forward-port
""" .format_map(users)),
( users [ ' user ' ] , " @ {user} @ {reviewer} the target branch ' b ' has been disabled, you may want to close this PR. " . format_map ( users ) ) ,
]
def test_disable_multitudes ( env , config , make_repo , users , setreviewers ) :
""" Ensure that deactivation ports can jump over other deactivated branches.
"""
# region setup
repo = make_repo ( " bob " )
project = env [ ' runbot_merge.project ' ] . create ( {
" name " : " bob " ,
" github_token " : config [ ' github ' ] [ ' token ' ] ,
" github_prefix " : " hansen " ,
2024-11-29 15:04:06 +07:00
' github_name ' : config [ ' github ' ] [ ' name ' ] ,
' github_email ' : " foo@example.org " ,
2024-02-23 19:46:37 +07:00
" fp_github_token " : config [ ' github ' ] [ ' token ' ] ,
" fp_github_name " : " herbert " ,
" branch_ids " : [
( 0 , 0 , { ' name ' : ' a ' , ' sequence ' : 90 } ) ,
( 0 , 0 , { ' name ' : ' b ' , ' sequence ' : 80 } ) ,
( 0 , 0 , { ' name ' : ' c ' , ' sequence ' : 70 } ) ,
( 0 , 0 , { ' name ' : ' d ' , ' sequence ' : 60 } ) ,
] ,
" repo_ids " : [ ( 0 , 0 , {
' name ' : repo . name ,
' required_statuses ' : ' default ' ,
' fp_remote_target ' : repo . name ,
} ) ] ,
} )
setreviewers ( project . repo_ids )
[ADD] *: per-repository webhook secret
Currently webhook secrets are configured per *project* which is an
issue both because different repositories may have different
administrators and thus creates safety concerns, and because multiple
repositories can feed into different projects (e.g. on mergebot,
odoo-dev/odoo is both an ancillary repository to the main RD project,
and the main repository to the minor / legacy master-wowl
project). This means it can be necessary to have multiple projects
share the same secret as well, this then mandates the secret for more
repositories per (1).
This is a pain in the ass, so just detach secrets from projects and
link them *only* to repositories, it's cleaner and easier to manage
and set up progressively.
This requires a lot of changes to the tests, as they all need to
correctly configure the signaling.
For `runbot_merge` there was *some* setup sharing already via the
module-level `repo` fixtures`, those were merged into a conftest-level
fixture which could handle the signaling setup. A few tests which
unnecessarily set up repositories ad-hoc were also moved to the
fixture. But for most of the ad-hoc setup in `runbot_merge`, as well
as `forwardport` where it's all ad-hoc, events sources setup was just
appended as is. This should probably be cleaned up at one point, with
the various requirements collected and organised into a small set of
fixtures doing the job more uniformly.
Fixes #887
2024-06-06 16:07:57 +07:00
env [ ' runbot_merge.events_sources ' ] . create ( { ' repository ' : repo . name } )
2024-02-23 19:46:37 +07:00
with repo :
[ a , b , c , d ] = repo . make_commits (
None ,
Commit ( " a " , tree = { " branch " : " a " } ) ,
Commit ( " b " , tree = { " branch " : " b " } ) ,
Commit ( " c " , tree = { " branch " : " c " } ) ,
Commit ( " d " , tree = { " branch " : " d " } ) ,
)
repo . make_ref ( " heads/a " , a )
repo . make_ref ( " heads/b " , b )
repo . make_ref ( " heads/c " , c )
repo . make_ref ( " heads/d " , d )
# endregion
with repo :
[ a ] = repo . make_commits ( " a " , Commit ( " X " , tree = { " x " : " 1 " } ) , ref = " heads/x " )
pra = repo . make_pr ( target = " a " , head = " x " )
pra . post_comment ( " hansen r+ " , config [ ' role_reviewer ' ] [ ' token ' ] )
repo . post_status ( a , " success " )
env . run_crons ( )
with repo :
repo . post_status ( ' staging.a ' , ' success ' )
env . run_crons ( )
pra_id = to_pr ( env , pra )
assert pra_id . state == ' merged '
prb_id = env [ ' runbot_merge.pull_requests ' ] . search ( [ ( ' target.name ' , ' = ' , ' b ' ) ] )
assert prb_id . parent_id == pra_id
project . write ( {
' branch_ids ' : [
( 1 , b . id , { ' active ' : False } )
for b in env [ ' runbot_merge.branch ' ] . search ( [ ( ' name ' , ' in ' , [ ' b ' , ' c ' ] ) ] )
]
} )
env . run_crons ( )
# should not have ported prb to the disabled branch c
assert not env [ ' runbot_merge.pull_requests ' ] . search ( [ ( ' target.name ' , ' = ' , ' c ' ) ] )
# should have ported prb to the active branch d
prd_id = env [ ' runbot_merge.pull_requests ' ] . search ( [ ( ' target.name ' , ' = ' , ' d ' ) ] )
assert prd_id
assert prd_id . parent_id == prb_id
prb = repo . get_pr ( prb_id . number )
assert prb . comments == [
seen ( env , prb , users ) ,
( users [ ' user ' ] , ' This PR targets b and is part of the forward-port chain. Further PRs will be created up to d. \n \n More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port \n ' ) ,
( users [ ' user ' ] , """ \
@ { user } @ { reviewer } the target branch ' b ' has been disabled , you may want to close this PR .
As this was not its limit , it will automatically be forward ported to the next active branch . \
""" .format_map(users)),
]
prd = repo . get_pr ( prd_id . number )
assert prd . comments == [
seen ( env , prd , users ) ,
( users [ ' user ' ] , """ \
@ { user } @ { reviewer } this PR targets d and is the last of the forward - port chain .
To merge the full chain , use
> @hansen r +
More info at https : / / github . com / odoo / odoo / wiki / Mergebot #forward-port
""" .format_map(users))
]
2024-06-10 23:47:49 +07:00
FMT = ' % Y- % m- %d % H: % M: % S '
FAKE_PREV_WEEK = ( datetime . now ( ) + timedelta ( days = 1 ) ) . strftime ( FMT )
def test_reminder_detached ( env , config , make_repo , users ) :
""" On detached forward ports, both sides of the detachment should be notified.
"""
# region setup
prod , _ = make_basic ( env , config , make_repo , statuses = ' default ' )
with prod :
prod . make_commits ( ' a ' , Commit ( ' c ' , tree = { ' x ' : ' 0 ' } ) , ref = " heads/abranch " )
pr_a = prod . make_pr ( target = ' a ' , head = ' abranch ' )
prod . post_status ( ' abranch ' , ' success ' )
pr_a . post_comment ( ' hansen r+ fw=skipci ' , config [ ' role_reviewer ' ] [ ' token ' ] )
env . run_crons ( )
with prod :
prod . post_status ( ' staging.a ' , ' success ' )
env . run_crons ( )
pr_a_id = to_pr ( env , pr_a )
pr_b_id = env [ ' runbot_merge.pull_requests ' ] . search ( [
( ' target.name ' , ' = ' , ' b ' ) ,
( ' parent_id ' , ' = ' , pr_a_id . id ) ,
] )
assert pr_b_id
with prod :
prod . post_status ( pr_b_id . head , ' success ' )
env . run_crons ( )
pr_c_id = env [ ' runbot_merge.pull_requests ' ] . search ( [
( ' target.name ' , ' = ' , ' c ' ) ,
( ' parent_id ' , ' = ' , pr_b_id . id ) ,
] )
assert pr_c_id
# endregion
pr_b = prod . get_pr ( pr_b_id . number )
pr_c = prod . get_pr ( pr_c_id . number )
# region sanity check
2024-07-30 18:45:51 +07:00
env . run_crons ( ' forwardport.reminder ' , context = { ' forwardport_updated_before ' : FAKE_PREV_WEEK } )
2024-06-10 23:47:49 +07:00
assert pr_b . comments == [
seen ( env , pr_b , users ) ,
( users [ ' user ' ] , """ \
This PR targets b and is part of the forward - port chain . Further PRs will be created up to c .
More info at https : / / github . com / odoo / odoo / wiki / Mergebot #forward-port
""" )], " the intermediate PR should not be reminded "
assert pr_c . comments == [
seen ( env , pr_c , users ) ,
( users [ ' user ' ] , """ \
@ % s @ % s this PR targets c and is the last of the forward - port chain containing :
* % s
To merge the full chain , use
> @hansen r +
More info at https : / / github . com / odoo / odoo / wiki / Mergebot #forward-port
""" % (
users [ ' user ' ] , users [ ' reviewer ' ] ,
pr_b_id . display_name ,
) ) ,
( users [ ' user ' ] , " @ %s @ %s this forward port of %s is awaiting action (not merged or closed). " % (
users [ ' user ' ] ,
users [ ' reviewer ' ] ,
pr_a_id . display_name ,
) )
] , " the final PR should be reminded "
# endregion
# region check detached
pr_c_id . write ( { ' parent_id ' : False , ' detach_reason ' : ' because ' } )
2024-07-30 18:45:51 +07:00
env . run_crons ( ' forwardport.reminder ' , context = { ' forwardport_updated_before ' : FAKE_PREV_WEEK } )
2024-06-10 23:47:49 +07:00
assert pr_b . comments [ 2 : ] == [
( users [ ' user ' ] , " @ %s @ %s child PR %s was modified / updated and has become a normal PR. This PR (and any of its parents) will need to be merged independently as approvals won ' t cross. " % (
users [ ' user ' ] ,
users [ ' reviewer ' ] ,
pr_c_id . display_name ,
) ) ,
( users [ ' user ' ] , " @ %s @ %s this forward port of %s is awaiting action (not merged or closed). " % (
users [ ' user ' ] ,
users [ ' reviewer ' ] ,
pr_a_id . display_name ,
) )
] , " the detached-from intermediate PR should now be reminded "
assert pr_c . comments [ 3 : ] == [
( users [ ' user ' ] , " @ %(user)s @ %(reviewer)s this PR was modified / updated and has become a normal PR. It must be merged directly. " % users ) ,
( users [ ' user ' ] , " @ %s @ %s this forward port of %s is awaiting action (not merged or closed). " % (
users [ ' user ' ] ,
users [ ' reviewer ' ] ,
pr_a_id . display_name ,
) )
] , " the final forward port should be reminded as before "
# endregion