[IMP] *: crons tests running for better triggered compatibility
Mergebot / forwardport crons need to run in a specific ordering in
order to flow into one another correctly. The default ordering being
unspecified, it was not possible to use the normal cron
runner (instead of the external driver running crons in sequence one
at a time). This can be fixed by setting *sequences* on crons, as the
cron runner (`_process_jobs`) will use that order to acquire and run
crons.
Also override `_process_jobs` however: the built-in cron runner
fetches a static list of ready crons, then runs that.
This is fine for normal situation where the cron runner runs in a loop
anyway but it's any issue for the tests, as we expect that cron A can
trigger cron B, and we want cron B to run *right now* even if it
hadn't been triggered before cron A ran.
We can replace `_process_job` with a cut down version which does
that (cut down because we don't need most of the error handling /
resilience, there's no concurrent workers, there's no module being
installed, versions must match, ...). This allows e.g. the cron
propagating commit statuses to trigger the staging cron, and both will
run within the same `run_crons` session.
Something I didn't touch is that `_process_jobs` internally creates
completely new environments so there is no way to pass context into
the cron jobs anymore (whereas it works for `method_direct_trigger`),
this means the context values have to be shunted elsewhere for that
purpose which is gross. But even though I'm replacing `_process_jobs`,
this seems a bit too much of a change in cron execution semantics. So
left it out.
While at it tho, silence the spammy `py.warnings` stuff I can't do
much about.
2024-07-30 14:21:05 +07:00
|
|
|
import datetime
|
2023-11-30 18:27:09 +07:00
|
|
|
import functools
|
|
|
|
from itertools import repeat
|
|
|
|
|
2023-11-29 19:58:40 +07:00
|
|
|
import pytest
|
|
|
|
|
2023-11-30 18:27:09 +07:00
|
|
|
from utils import Commit, to_pr, ensure_one
|
|
|
|
|
2023-11-29 19:58:40 +07:00
|
|
|
|
|
|
|
def test_disable_staging(env, project, repo, config):
|
|
|
|
"""In order to avoid issues of cron locking, as well as not disable staging
|
|
|
|
for every project when trying to freeze just one of them (cough cough), a
|
|
|
|
toggle is available on the project to skip staging for it.
|
|
|
|
"""
|
|
|
|
with repo:
|
|
|
|
[m] = repo.make_commits(None, Commit("m", tree={"a": "1"}), ref="heads/master")
|
|
|
|
|
|
|
|
[c] = repo.make_commits(m, Commit("c", tree={"a": "2"}), ref="heads/other")
|
|
|
|
pr = repo.make_pr(title="whatever", target="master", head="other")
|
|
|
|
pr.post_comment("hansen r+", config["role_reviewer"]['token'])
|
|
|
|
repo.post_status(c, "success")
|
|
|
|
env.run_crons()
|
|
|
|
|
|
|
|
pr_id = to_pr(env, pr)
|
|
|
|
staging_1 = pr_id.staging_id
|
|
|
|
assert staging_1.active
|
|
|
|
|
|
|
|
project.staging_enabled = False
|
|
|
|
staging_1.cancel("because")
|
|
|
|
|
|
|
|
env.run_crons()
|
|
|
|
|
|
|
|
assert staging_1.active is False
|
|
|
|
assert staging_1.state == "cancelled"
|
|
|
|
assert not pr_id.staging_id.active,\
|
|
|
|
"should not be re-staged, because staging has been disabled"
|
2023-11-30 18:27:09 +07:00
|
|
|
|
|
|
|
@pytest.mark.parametrize('mode,cutoff,second', [
|
|
|
|
# default mode, the second staging is the first half of the first staging
|
|
|
|
('default', 2, [0]),
|
|
|
|
# splits are right-biased (the midpoint is rounded down), so for odd
|
|
|
|
# staging sizes the first split is the smaller one
|
|
|
|
('default', 3, [0]),
|
|
|
|
# if the split results in ((1, 2), 1), largest stages the second
|
|
|
|
('largest', 3, [1, 2]),
|
|
|
|
# if the split results in ((1, 1), 2), largest stages the ready PRs
|
|
|
|
('largest', 2, [2, 3]),
|
|
|
|
# even if it's a small minority, ready selects the ready PR(s)
|
|
|
|
('ready', 3, [3]),
|
|
|
|
('ready', 2, [2, 3]),
|
|
|
|
])
|
|
|
|
def test_staging_priority(env, project, repo, config, mode, cutoff, second):
|
|
|
|
"""By default, unless a PR is prioritised as "alone" splits take priority
|
|
|
|
over new stagings.
|
|
|
|
|
|
|
|
*However* to try and maximise throughput in trying times, it's possible to
|
|
|
|
configure the project to prioritise either the largest staging (between spit
|
|
|
|
and ready batches), or to just prioritise new stagings.
|
|
|
|
"""
|
|
|
|
def select(prs, indices):
|
|
|
|
zero = env['runbot_merge.pull_requests']
|
|
|
|
filtered = (p for i, p in enumerate(prs) if i in indices)
|
|
|
|
return functools.reduce(lambda a, b: a | b, filtered, zero)
|
|
|
|
|
|
|
|
project.staging_priority = mode
|
|
|
|
# we need at least 3 PRs, two that we can split out, and one leftover
|
|
|
|
with repo:
|
|
|
|
[m] = repo.make_commits(None, Commit("m", tree={"ble": "1"}), ref="heads/master")
|
|
|
|
|
[IMP] *: crons tests running for better triggered compatibility
Mergebot / forwardport crons need to run in a specific ordering in
order to flow into one another correctly. The default ordering being
unspecified, it was not possible to use the normal cron
runner (instead of the external driver running crons in sequence one
at a time). This can be fixed by setting *sequences* on crons, as the
cron runner (`_process_jobs`) will use that order to acquire and run
crons.
Also override `_process_jobs` however: the built-in cron runner
fetches a static list of ready crons, then runs that.
This is fine for normal situation where the cron runner runs in a loop
anyway but it's any issue for the tests, as we expect that cron A can
trigger cron B, and we want cron B to run *right now* even if it
hadn't been triggered before cron A ran.
We can replace `_process_job` with a cut down version which does
that (cut down because we don't need most of the error handling /
resilience, there's no concurrent workers, there's no module being
installed, versions must match, ...). This allows e.g. the cron
propagating commit statuses to trigger the staging cron, and both will
run within the same `run_crons` session.
Something I didn't touch is that `_process_jobs` internally creates
completely new environments so there is no way to pass context into
the cron jobs anymore (whereas it works for `method_direct_trigger`),
this means the context values have to be shunted elsewhere for that
purpose which is gross. But even though I'm replacing `_process_jobs`,
this seems a bit too much of a change in cron execution semantics. So
left it out.
While at it tho, silence the spammy `py.warnings` stuff I can't do
much about.
2024-07-30 14:21:05 +07:00
|
|
|
repo.make_commits(m, Commit("c", tree={"1": "1"}), ref="heads/pr1")
|
2023-11-30 18:27:09 +07:00
|
|
|
pr1 = repo.make_pr(title="whatever", target="master", head="pr1")
|
|
|
|
|
[IMP] *: crons tests running for better triggered compatibility
Mergebot / forwardport crons need to run in a specific ordering in
order to flow into one another correctly. The default ordering being
unspecified, it was not possible to use the normal cron
runner (instead of the external driver running crons in sequence one
at a time). This can be fixed by setting *sequences* on crons, as the
cron runner (`_process_jobs`) will use that order to acquire and run
crons.
Also override `_process_jobs` however: the built-in cron runner
fetches a static list of ready crons, then runs that.
This is fine for normal situation where the cron runner runs in a loop
anyway but it's any issue for the tests, as we expect that cron A can
trigger cron B, and we want cron B to run *right now* even if it
hadn't been triggered before cron A ran.
We can replace `_process_job` with a cut down version which does
that (cut down because we don't need most of the error handling /
resilience, there's no concurrent workers, there's no module being
installed, versions must match, ...). This allows e.g. the cron
propagating commit statuses to trigger the staging cron, and both will
run within the same `run_crons` session.
Something I didn't touch is that `_process_jobs` internally creates
completely new environments so there is no way to pass context into
the cron jobs anymore (whereas it works for `method_direct_trigger`),
this means the context values have to be shunted elsewhere for that
purpose which is gross. But even though I'm replacing `_process_jobs`,
this seems a bit too much of a change in cron execution semantics. So
left it out.
While at it tho, silence the spammy `py.warnings` stuff I can't do
much about.
2024-07-30 14:21:05 +07:00
|
|
|
repo.make_commits(m, Commit("c", tree={"2": "2"}), ref="heads/pr2")
|
2023-11-30 18:27:09 +07:00
|
|
|
pr2 = repo.make_pr(title="whatever", target="master", head="pr2")
|
|
|
|
|
[IMP] *: crons tests running for better triggered compatibility
Mergebot / forwardport crons need to run in a specific ordering in
order to flow into one another correctly. The default ordering being
unspecified, it was not possible to use the normal cron
runner (instead of the external driver running crons in sequence one
at a time). This can be fixed by setting *sequences* on crons, as the
cron runner (`_process_jobs`) will use that order to acquire and run
crons.
Also override `_process_jobs` however: the built-in cron runner
fetches a static list of ready crons, then runs that.
This is fine for normal situation where the cron runner runs in a loop
anyway but it's any issue for the tests, as we expect that cron A can
trigger cron B, and we want cron B to run *right now* even if it
hadn't been triggered before cron A ran.
We can replace `_process_job` with a cut down version which does
that (cut down because we don't need most of the error handling /
resilience, there's no concurrent workers, there's no module being
installed, versions must match, ...). This allows e.g. the cron
propagating commit statuses to trigger the staging cron, and both will
run within the same `run_crons` session.
Something I didn't touch is that `_process_jobs` internally creates
completely new environments so there is no way to pass context into
the cron jobs anymore (whereas it works for `method_direct_trigger`),
this means the context values have to be shunted elsewhere for that
purpose which is gross. But even though I'm replacing `_process_jobs`,
this seems a bit too much of a change in cron execution semantics. So
left it out.
While at it tho, silence the spammy `py.warnings` stuff I can't do
much about.
2024-07-30 14:21:05 +07:00
|
|
|
repo.make_commits(m, Commit("c", tree={"3": "3"}), ref="heads/pr3")
|
2023-11-30 18:27:09 +07:00
|
|
|
pr3 = repo.make_pr(title="whatever", target="master", head="pr3")
|
|
|
|
|
[IMP] *: crons tests running for better triggered compatibility
Mergebot / forwardport crons need to run in a specific ordering in
order to flow into one another correctly. The default ordering being
unspecified, it was not possible to use the normal cron
runner (instead of the external driver running crons in sequence one
at a time). This can be fixed by setting *sequences* on crons, as the
cron runner (`_process_jobs`) will use that order to acquire and run
crons.
Also override `_process_jobs` however: the built-in cron runner
fetches a static list of ready crons, then runs that.
This is fine for normal situation where the cron runner runs in a loop
anyway but it's any issue for the tests, as we expect that cron A can
trigger cron B, and we want cron B to run *right now* even if it
hadn't been triggered before cron A ran.
We can replace `_process_job` with a cut down version which does
that (cut down because we don't need most of the error handling /
resilience, there's no concurrent workers, there's no module being
installed, versions must match, ...). This allows e.g. the cron
propagating commit statuses to trigger the staging cron, and both will
run within the same `run_crons` session.
Something I didn't touch is that `_process_jobs` internally creates
completely new environments so there is no way to pass context into
the cron jobs anymore (whereas it works for `method_direct_trigger`),
this means the context values have to be shunted elsewhere for that
purpose which is gross. But even though I'm replacing `_process_jobs`,
this seems a bit too much of a change in cron execution semantics. So
left it out.
While at it tho, silence the spammy `py.warnings` stuff I can't do
much about.
2024-07-30 14:21:05 +07:00
|
|
|
repo.make_commits(m, Commit("c", tree={"4": "4"}), ref="heads/pr4")
|
2023-11-30 18:27:09 +07:00
|
|
|
pr4 = repo.make_pr(title="whatever", target="master", head="pr4")
|
|
|
|
|
|
|
|
prs = [pr1, pr2, pr3, pr4]
|
|
|
|
pr_ids = functools.reduce(
|
|
|
|
lambda a, b: a | b,
|
|
|
|
map(to_pr, repeat(env), prs)
|
|
|
|
)
|
|
|
|
# ready the PRs for the initial staging (to split)
|
|
|
|
pre_cutoff = pr_ids[:cutoff]
|
|
|
|
with repo:
|
|
|
|
for pr, pr_id in zip(prs[:cutoff], pre_cutoff):
|
|
|
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
|
|
|
repo.post_status(pr_id.head, 'success')
|
|
|
|
env.run_crons()
|
|
|
|
# check they staged as expected
|
|
|
|
assert all(p.staging_id for p in pre_cutoff)
|
|
|
|
staging = ensure_one(env['runbot_merge.stagings'].search([]))
|
|
|
|
ensure_one(pre_cutoff.staging_id)
|
|
|
|
|
|
|
|
# ready the rest
|
|
|
|
with repo:
|
|
|
|
for pr, pr_id in zip(prs[cutoff:], pr_ids[cutoff:]):
|
|
|
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
|
|
|
repo.post_status(pr_id.head, 'success')
|
2024-07-30 18:42:00 +07:00
|
|
|
env.run_crons(None)
|
2023-11-30 18:27:09 +07:00
|
|
|
assert not pr_ids.filtered(lambda p: p.blocked)
|
|
|
|
|
|
|
|
# trigger a split
|
|
|
|
with repo:
|
|
|
|
repo.post_status('staging.master', 'failure')
|
[IMP] *: crons tests running for better triggered compatibility
Mergebot / forwardport crons need to run in a specific ordering in
order to flow into one another correctly. The default ordering being
unspecified, it was not possible to use the normal cron
runner (instead of the external driver running crons in sequence one
at a time). This can be fixed by setting *sequences* on crons, as the
cron runner (`_process_jobs`) will use that order to acquire and run
crons.
Also override `_process_jobs` however: the built-in cron runner
fetches a static list of ready crons, then runs that.
This is fine for normal situation where the cron runner runs in a loop
anyway but it's any issue for the tests, as we expect that cron A can
trigger cron B, and we want cron B to run *right now* even if it
hadn't been triggered before cron A ran.
We can replace `_process_job` with a cut down version which does
that (cut down because we don't need most of the error handling /
resilience, there's no concurrent workers, there's no module being
installed, versions must match, ...). This allows e.g. the cron
propagating commit statuses to trigger the staging cron, and both will
run within the same `run_crons` session.
Something I didn't touch is that `_process_jobs` internally creates
completely new environments so there is no way to pass context into
the cron jobs anymore (whereas it works for `method_direct_trigger`),
this means the context values have to be shunted elsewhere for that
purpose which is gross. But even though I'm replacing `_process_jobs`,
this seems a bit too much of a change in cron execution semantics. So
left it out.
While at it tho, silence the spammy `py.warnings` stuff I can't do
much about.
2024-07-30 14:21:05 +07:00
|
|
|
|
|
|
|
# specifically delay creation of new staging to observe the failed
|
|
|
|
# staging's state and the splits
|
|
|
|
model, cron_id = env['ir.model.data'].check_object_reference('runbot_merge', 'staging_cron')
|
2024-08-01 15:15:32 +07:00
|
|
|
staging_cron = env[model].browse([cron_id])
|
|
|
|
staging_cron.active = False
|
|
|
|
|
2024-07-31 14:40:53 +07:00
|
|
|
env.run_crons(None)
|
2023-11-30 18:27:09 +07:00
|
|
|
assert not staging.active
|
|
|
|
assert not env['runbot_merge.stagings'].search([]).active
|
|
|
|
assert env['runbot_merge.split'].search_count([]) == 2
|
|
|
|
|
2024-08-01 15:15:32 +07:00
|
|
|
staging_cron.active = True
|
|
|
|
# manually trigger that cron, as having the cron disabled prevented the creation of the triggers entirely
|
|
|
|
env.run_crons('runbot_merge.staging_cron')
|
2023-11-30 18:27:09 +07:00
|
|
|
|
|
|
|
# check that st.pr_ids are the PRs we expect
|
|
|
|
st = env['runbot_merge.stagings'].search([])
|
|
|
|
assert st.pr_ids == select(pr_ids, second)
|