2023-08-31 14:03:17 +07:00
|
|
|
import pytest
|
|
|
|
|
2022-07-29 17:37:23 +07:00
|
|
|
from utils import seen, Commit, pr_page
|
2020-10-06 17:43:57 +07:00
|
|
|
|
2022-07-29 17:37:23 +07:00
|
|
|
def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, config, users, page):
|
2020-10-06 17:43:57 +07:00
|
|
|
""" PRs to disabled branches are ignored, but what if the PR exists *before*
|
|
|
|
the branch is disabled?
|
|
|
|
"""
|
2023-08-31 14:03:17 +07:00
|
|
|
# run crons from template to clean up the queue before possibly creating
|
|
|
|
# new work
|
|
|
|
assert env['base'].run_crons()
|
|
|
|
|
2020-10-06 17:43:57 +07:00
|
|
|
repo = make_repo('repo')
|
|
|
|
project.branch_ids.sequence = 0
|
|
|
|
project.write({'branch_ids': [
|
|
|
|
(0, 0, {'name': 'other', 'sequence': 1}),
|
|
|
|
(0, 0, {'name': 'other2', 'sequence': 2}),
|
|
|
|
]})
|
|
|
|
repo_id = env['runbot_merge.repository'].create({
|
|
|
|
'project_id': project.id,
|
|
|
|
'name': repo.name,
|
2022-07-29 17:37:23 +07:00
|
|
|
'status_ids': [(0, 0, {'context': 'status'})],
|
|
|
|
'group_id': False,
|
2020-10-06 17:43:57 +07:00
|
|
|
})
|
|
|
|
setreviewers(*project.repo_ids)
|
[ADD] *: per-repository webhook secret
Currently webhook secrets are configured per *project* which is an
issue both because different repositories may have different
administrators and thus creates safety concerns, and because multiple
repositories can feed into different projects (e.g. on mergebot,
odoo-dev/odoo is both an ancillary repository to the main RD project,
and the main repository to the minor / legacy master-wowl
project). This means it can be necessary to have multiple projects
share the same secret as well, this then mandates the secret for more
repositories per (1).
This is a pain in the ass, so just detach secrets from projects and
link them *only* to repositories, it's cleaner and easier to manage
and set up progressively.
This requires a lot of changes to the tests, as they all need to
correctly configure the signaling.
For `runbot_merge` there was *some* setup sharing already via the
module-level `repo` fixtures`, those were merged into a conftest-level
fixture which could handle the signaling setup. A few tests which
unnecessarily set up repositories ad-hoc were also moved to the
fixture. But for most of the ad-hoc setup in `runbot_merge`, as well
as `forwardport` where it's all ad-hoc, events sources setup was just
appended as is. This should probably be cleaned up at one point, with
the various requirements collected and organised into a small set of
fixtures doing the job more uniformly.
Fixes #887
2024-06-06 16:07:57 +07:00
|
|
|
env['runbot_merge.events_sources'].create({'repository': repo.name})
|
2020-10-06 17:43:57 +07:00
|
|
|
|
|
|
|
with repo:
|
|
|
|
[m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master')
|
|
|
|
[ot] = repo.make_commits(m, Commit('other', tree={'b': '1'}), ref='heads/other')
|
|
|
|
repo.make_commits(m, Commit('other2', tree={'c': '1'}), ref='heads/other2')
|
|
|
|
|
|
|
|
[c] = repo.make_commits(ot, Commit('wheee', tree={'b': '2'}))
|
|
|
|
pr = repo.make_pr(title="title", body='body', target='other', head=c)
|
|
|
|
repo.post_status(c, 'success', 'status')
|
2022-07-29 17:37:23 +07:00
|
|
|
pr.post_comment('hansen r+', config['role_reviewer']['token'])
|
|
|
|
env.run_crons()
|
2020-10-06 17:43:57 +07:00
|
|
|
|
|
|
|
pr_id = env['runbot_merge.pull_requests'].search([
|
|
|
|
('repository', '=', repo_id.id),
|
|
|
|
('number', '=', pr.number),
|
|
|
|
])
|
2022-07-29 17:37:23 +07:00
|
|
|
branch_id = pr_id.target
|
|
|
|
assert pr_id.staging_id
|
|
|
|
staging_id = branch_id.active_staging_id
|
|
|
|
assert staging_id == pr_id.staging_id
|
2020-10-06 17:43:57 +07:00
|
|
|
|
[IMP] *: crons tests running for better triggered compatibility
Mergebot / forwardport crons need to run in a specific ordering in
order to flow into one another correctly. The default ordering being
unspecified, it was not possible to use the normal cron
runner (instead of the external driver running crons in sequence one
at a time). This can be fixed by setting *sequences* on crons, as the
cron runner (`_process_jobs`) will use that order to acquire and run
crons.
Also override `_process_jobs` however: the built-in cron runner
fetches a static list of ready crons, then runs that.
This is fine for normal situation where the cron runner runs in a loop
anyway but it's any issue for the tests, as we expect that cron A can
trigger cron B, and we want cron B to run *right now* even if it
hadn't been triggered before cron A ran.
We can replace `_process_job` with a cut down version which does
that (cut down because we don't need most of the error handling /
resilience, there's no concurrent workers, there's no module being
installed, versions must match, ...). This allows e.g. the cron
propagating commit statuses to trigger the staging cron, and both will
run within the same `run_crons` session.
Something I didn't touch is that `_process_jobs` internally creates
completely new environments so there is no way to pass context into
the cron jobs anymore (whereas it works for `method_direct_trigger`),
this means the context values have to be shunted elsewhere for that
purpose which is gross. But even though I'm replacing `_process_jobs`,
this seems a bit too much of a change in cron execution semantics. So
left it out.
While at it tho, silence the spammy `py.warnings` stuff I can't do
much about.
2024-07-30 14:21:05 +07:00
|
|
|
# staging of `pr` should have generated a staging branch
|
|
|
|
_ = repo.get_ref('heads/staging.other')
|
|
|
|
# stagings should not need a tmp branch anymore, so this should not exist
|
|
|
|
with pytest.raises(AssertionError, match=r'Not Found'):
|
|
|
|
repo.get_ref('heads/tmp.other')
|
|
|
|
|
2020-10-06 17:43:57 +07:00
|
|
|
# disable branch "other"
|
2022-07-29 17:37:23 +07:00
|
|
|
branch_id.active = False
|
2020-10-06 17:43:57 +07:00
|
|
|
env.run_crons()
|
|
|
|
|
[IMP] *: crons tests running for better triggered compatibility
Mergebot / forwardport crons need to run in a specific ordering in
order to flow into one another correctly. The default ordering being
unspecified, it was not possible to use the normal cron
runner (instead of the external driver running crons in sequence one
at a time). This can be fixed by setting *sequences* on crons, as the
cron runner (`_process_jobs`) will use that order to acquire and run
crons.
Also override `_process_jobs` however: the built-in cron runner
fetches a static list of ready crons, then runs that.
This is fine for normal situation where the cron runner runs in a loop
anyway but it's any issue for the tests, as we expect that cron A can
trigger cron B, and we want cron B to run *right now* even if it
hadn't been triggered before cron A ran.
We can replace `_process_job` with a cut down version which does
that (cut down because we don't need most of the error handling /
resilience, there's no concurrent workers, there's no module being
installed, versions must match, ...). This allows e.g. the cron
propagating commit statuses to trigger the staging cron, and both will
run within the same `run_crons` session.
Something I didn't touch is that `_process_jobs` internally creates
completely new environments so there is no way to pass context into
the cron jobs anymore (whereas it works for `method_direct_trigger`),
this means the context values have to be shunted elsewhere for that
purpose which is gross. But even though I'm replacing `_process_jobs`,
this seems a bit too much of a change in cron execution semantics. So
left it out.
While at it tho, silence the spammy `py.warnings` stuff I can't do
much about.
2024-07-30 14:21:05 +07:00
|
|
|
# triggered cleanup should have deleted the staging for the disabled `other`
|
|
|
|
# target branch
|
|
|
|
with pytest.raises(AssertionError, match=r'Not Found'):
|
|
|
|
repo.get_ref('heads/staging.other')
|
|
|
|
|
2022-11-02 15:24:46 +07:00
|
|
|
# the PR should not have been closed implicitly
|
|
|
|
assert pr_id.state == 'ready'
|
|
|
|
# but it should be unstaged
|
|
|
|
assert not pr_id.staging_id
|
|
|
|
|
2022-07-29 17:37:23 +07:00
|
|
|
assert not branch_id.active_staging_id
|
|
|
|
assert staging_id.state == 'cancelled', \
|
|
|
|
"closing the PRs should have canceled the staging"
|
2023-08-10 18:21:21 +07:00
|
|
|
assert staging_id.reason == "Target branch deactivated by 'admin'."
|
2020-10-06 17:43:57 +07:00
|
|
|
|
2022-07-29 17:37:23 +07:00
|
|
|
p = pr_page(page, pr)
|
2024-03-05 18:59:58 +07:00
|
|
|
[target] = p.cssselect('table tr.bg-info')
|
|
|
|
assert 'inactive' in target.classes
|
|
|
|
assert target[0].text_content() == "other"
|
2022-07-29 17:37:23 +07:00
|
|
|
|
2020-10-06 17:43:57 +07:00
|
|
|
assert pr.comments == [
|
|
|
|
(users['reviewer'], "hansen r+"),
|
2020-11-17 21:21:21 +07:00
|
|
|
seen(env, pr, users),
|
2023-06-12 19:41:42 +07:00
|
|
|
(users['user'], "@%(user)s @%(reviewer)s the target branch 'other' has been disabled, you may want to close this PR." % users),
|
2022-07-29 17:37:23 +07:00
|
|
|
]
|
|
|
|
|
|
|
|
with repo:
|
|
|
|
[c2] = repo.make_commits(ot, Commit('wheee', tree={'b': '3'}))
|
|
|
|
repo.update_ref(pr.ref, c2, force=True)
|
|
|
|
assert pr_id.head == c2, "pr should be aware of its update"
|
2020-10-06 17:43:57 +07:00
|
|
|
|
|
|
|
with repo:
|
|
|
|
pr.base = 'other2'
|
|
|
|
repo.post_status(c2, 'success', 'status')
|
|
|
|
pr.post_comment('hansen rebase-ff r+', config['role_reviewer']['token'])
|
|
|
|
env.run_crons()
|
|
|
|
|
|
|
|
assert pr_id.state == 'ready'
|
|
|
|
assert pr_id.target == env['runbot_merge.branch'].search([('name', '=', 'other2')])
|
|
|
|
assert pr_id.staging_id
|
|
|
|
|
2023-08-31 14:03:17 +07:00
|
|
|
# staging of `pr` should have generated a staging branch
|
[IMP] *: crons tests running for better triggered compatibility
Mergebot / forwardport crons need to run in a specific ordering in
order to flow into one another correctly. The default ordering being
unspecified, it was not possible to use the normal cron
runner (instead of the external driver running crons in sequence one
at a time). This can be fixed by setting *sequences* on crons, as the
cron runner (`_process_jobs`) will use that order to acquire and run
crons.
Also override `_process_jobs` however: the built-in cron runner
fetches a static list of ready crons, then runs that.
This is fine for normal situation where the cron runner runs in a loop
anyway but it's any issue for the tests, as we expect that cron A can
trigger cron B, and we want cron B to run *right now* even if it
hadn't been triggered before cron A ran.
We can replace `_process_job` with a cut down version which does
that (cut down because we don't need most of the error handling /
resilience, there's no concurrent workers, there's no module being
installed, versions must match, ...). This allows e.g. the cron
propagating commit statuses to trigger the staging cron, and both will
run within the same `run_crons` session.
Something I didn't touch is that `_process_jobs` internally creates
completely new environments so there is no way to pass context into
the cron jobs anymore (whereas it works for `method_direct_trigger`),
this means the context values have to be shunted elsewhere for that
purpose which is gross. But even though I'm replacing `_process_jobs`,
this seems a bit too much of a change in cron execution semantics. So
left it out.
While at it tho, silence the spammy `py.warnings` stuff I can't do
much about.
2024-07-30 14:21:05 +07:00
|
|
|
_ = repo.get_ref('heads/staging.other2')
|
2023-08-31 14:03:17 +07:00
|
|
|
# stagings should not need a tmp branch anymore, so this should not exist
|
|
|
|
with pytest.raises(AssertionError, match=r'Not Found'):
|
[IMP] *: crons tests running for better triggered compatibility
Mergebot / forwardport crons need to run in a specific ordering in
order to flow into one another correctly. The default ordering being
unspecified, it was not possible to use the normal cron
runner (instead of the external driver running crons in sequence one
at a time). This can be fixed by setting *sequences* on crons, as the
cron runner (`_process_jobs`) will use that order to acquire and run
crons.
Also override `_process_jobs` however: the built-in cron runner
fetches a static list of ready crons, then runs that.
This is fine for normal situation where the cron runner runs in a loop
anyway but it's any issue for the tests, as we expect that cron A can
trigger cron B, and we want cron B to run *right now* even if it
hadn't been triggered before cron A ran.
We can replace `_process_job` with a cut down version which does
that (cut down because we don't need most of the error handling /
resilience, there's no concurrent workers, there's no module being
installed, versions must match, ...). This allows e.g. the cron
propagating commit statuses to trigger the staging cron, and both will
run within the same `run_crons` session.
Something I didn't touch is that `_process_jobs` internally creates
completely new environments so there is no way to pass context into
the cron jobs anymore (whereas it works for `method_direct_trigger`),
this means the context values have to be shunted elsewhere for that
purpose which is gross. But even though I'm replacing `_process_jobs`,
this seems a bit too much of a change in cron execution semantics. So
left it out.
While at it tho, silence the spammy `py.warnings` stuff I can't do
much about.
2024-07-30 14:21:05 +07:00
|
|
|
repo.get_ref('heads/tmp.other2')
|
2020-10-06 17:43:57 +07:00
|
|
|
|
|
|
|
def test_new_pr_no_branch(env, project, make_repo, setreviewers, users):
|
|
|
|
""" A new PR to an *unknown* branch should be ignored and warn
|
|
|
|
"""
|
|
|
|
repo = make_repo('repo')
|
|
|
|
repo_id = env['runbot_merge.repository'].create({
|
|
|
|
'project_id': project.id,
|
|
|
|
'name': repo.name,
|
|
|
|
'status_ids': [(0, 0, {'context': 'status'})]
|
|
|
|
})
|
|
|
|
setreviewers(*project.repo_ids)
|
[ADD] *: per-repository webhook secret
Currently webhook secrets are configured per *project* which is an
issue both because different repositories may have different
administrators and thus creates safety concerns, and because multiple
repositories can feed into different projects (e.g. on mergebot,
odoo-dev/odoo is both an ancillary repository to the main RD project,
and the main repository to the minor / legacy master-wowl
project). This means it can be necessary to have multiple projects
share the same secret as well, this then mandates the secret for more
repositories per (1).
This is a pain in the ass, so just detach secrets from projects and
link them *only* to repositories, it's cleaner and easier to manage
and set up progressively.
This requires a lot of changes to the tests, as they all need to
correctly configure the signaling.
For `runbot_merge` there was *some* setup sharing already via the
module-level `repo` fixtures`, those were merged into a conftest-level
fixture which could handle the signaling setup. A few tests which
unnecessarily set up repositories ad-hoc were also moved to the
fixture. But for most of the ad-hoc setup in `runbot_merge`, as well
as `forwardport` where it's all ad-hoc, events sources setup was just
appended as is. This should probably be cleaned up at one point, with
the various requirements collected and organised into a small set of
fixtures doing the job more uniformly.
Fixes #887
2024-06-06 16:07:57 +07:00
|
|
|
env['runbot_merge.events_sources'].create({'repository': repo.name})
|
2020-10-06 17:43:57 +07:00
|
|
|
|
|
|
|
with repo:
|
|
|
|
[m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master')
|
|
|
|
[ot] = repo.make_commits(m, Commit('other', tree={'b': '1'}), ref='heads/other')
|
|
|
|
|
|
|
|
[c] = repo.make_commits(ot, Commit('wheee', tree={'b': '2'}))
|
|
|
|
pr = repo.make_pr(title="title", body='body', target='other', head=c)
|
|
|
|
env.run_crons()
|
|
|
|
|
|
|
|
assert not env['runbot_merge.pull_requests'].search([
|
|
|
|
('repository', '=', repo_id.id),
|
|
|
|
('number', '=', pr.number),
|
|
|
|
]), "the PR should not have been created in the backend"
|
|
|
|
assert pr.comments == [
|
2022-07-29 17:37:23 +07:00
|
|
|
(users['user'], "This PR targets the un-managed branch %s:other, it needs to be retargeted before it can be merged." % repo.name),
|
2020-10-06 17:43:57 +07:00
|
|
|
]
|
|
|
|
|
|
|
|
def test_new_pr_disabled_branch(env, project, make_repo, setreviewers, users):
|
|
|
|
""" A new PR to a *disabled* branch should be accepted (rather than ignored)
|
|
|
|
but should warn
|
|
|
|
"""
|
|
|
|
repo = make_repo('repo')
|
|
|
|
repo_id = env['runbot_merge.repository'].create({
|
|
|
|
'project_id': project.id,
|
|
|
|
'name': repo.name,
|
|
|
|
'status_ids': [(0, 0, {'context': 'status'})]
|
|
|
|
})
|
|
|
|
env['runbot_merge.branch'].create({
|
|
|
|
'project_id': project.id,
|
|
|
|
'name': 'other',
|
|
|
|
'active': False,
|
|
|
|
})
|
|
|
|
setreviewers(*project.repo_ids)
|
[ADD] *: per-repository webhook secret
Currently webhook secrets are configured per *project* which is an
issue both because different repositories may have different
administrators and thus creates safety concerns, and because multiple
repositories can feed into different projects (e.g. on mergebot,
odoo-dev/odoo is both an ancillary repository to the main RD project,
and the main repository to the minor / legacy master-wowl
project). This means it can be necessary to have multiple projects
share the same secret as well, this then mandates the secret for more
repositories per (1).
This is a pain in the ass, so just detach secrets from projects and
link them *only* to repositories, it's cleaner and easier to manage
and set up progressively.
This requires a lot of changes to the tests, as they all need to
correctly configure the signaling.
For `runbot_merge` there was *some* setup sharing already via the
module-level `repo` fixtures`, those were merged into a conftest-level
fixture which could handle the signaling setup. A few tests which
unnecessarily set up repositories ad-hoc were also moved to the
fixture. But for most of the ad-hoc setup in `runbot_merge`, as well
as `forwardport` where it's all ad-hoc, events sources setup was just
appended as is. This should probably be cleaned up at one point, with
the various requirements collected and organised into a small set of
fixtures doing the job more uniformly.
Fixes #887
2024-06-06 16:07:57 +07:00
|
|
|
env['runbot_merge.events_sources'].create({'repository': repo.name})
|
2020-10-06 17:43:57 +07:00
|
|
|
|
|
|
|
with repo:
|
|
|
|
[m] = repo.make_commits(None, Commit('root', tree={'a': '1'}), ref='heads/master')
|
|
|
|
[ot] = repo.make_commits(m, Commit('other', tree={'b': '1'}), ref='heads/other')
|
|
|
|
|
|
|
|
[c] = repo.make_commits(ot, Commit('wheee', tree={'b': '2'}))
|
|
|
|
pr = repo.make_pr(title="title", body='body', target='other', head=c)
|
|
|
|
env.run_crons()
|
|
|
|
|
|
|
|
pr_id = env['runbot_merge.pull_requests'].search([
|
|
|
|
('repository', '=', repo_id.id),
|
|
|
|
('number', '=', pr.number),
|
|
|
|
])
|
|
|
|
assert pr_id, "the PR should have been created in the backend"
|
|
|
|
assert pr_id.state == 'opened'
|
|
|
|
assert pr.comments == [
|
2022-07-29 17:37:23 +07:00
|
|
|
(users['user'], "This PR targets the disabled branch %s:other, it needs to be retargeted before it can be merged." % repo.name),
|
2020-11-17 21:21:21 +07:00
|
|
|
seen(env, pr, users),
|
2020-10-06 17:43:57 +07:00
|
|
|
]
|