diff --git a/forwardport/__init__.py b/forwardport/__init__.py
new file mode 100644
index 00000000..0650744f
--- /dev/null
+++ b/forwardport/__init__.py
@@ -0,0 +1 @@
+from . import models
diff --git a/forwardport/__manifest__.py b/forwardport/__manifest__.py
new file mode 100644
index 00000000..c91fe427
--- /dev/null
+++ b/forwardport/__manifest__.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+{
+ 'name': 'forward port bot',
+ 'summary': "A port which forward ports successful PRs.",
+ 'depends': ['runbot_merge'],
+ 'data': [
+ 'data/security.xml',
+ 'data/crons.xml',
+ 'data/views.xml',
+ ],
+}
diff --git a/forwardport/data/crons.xml b/forwardport/data/crons.xml
new file mode 100644
index 00000000..426e5c71
--- /dev/null
+++ b/forwardport/data/crons.xml
@@ -0,0 +1,46 @@
+
+
+ Check if there are merged PRs to port
+
+ code
+ model._process()
+ 1
+ minutes
+ -1
+
+
+
+
+ Update followup FP PRs
+
+ code
+ model._process()
+ 1
+ minutes
+ -1
+
+
+
+
+ Remind open PR
+
+ code
+
+ for pr in env['runbot_merge.pull_requests'].search([
+ # only FP PRs
+ ('source_id', '!=', False),
+ # active
+ ('state', 'not in', ['merged', 'closed']),
+ ]):
+ env['runbot_merge.pull_requests.feedback'].create({
+ 'repository': pr.repository.id,
+ 'pull_request': pr.number,
+ 'message': "This forward port PR is awaiting action",
+ })
+
+ 1
+ weeks
+ -1
+
+
+
diff --git a/forwardport/data/security.xml b/forwardport/data/security.xml
new file mode 100644
index 00000000..3f7b8248
--- /dev/null
+++ b/forwardport/data/security.xml
@@ -0,0 +1,37 @@
+
+
+ Admin access to batches
+
+
+ 1
+ 1
+ 1
+ 1
+
+
+ Admin access to updates
+
+
+ 1
+ 1
+ 1
+ 1
+
+
+
+ No normal access to batches
+
+ 0
+ 0
+ 0
+ 0
+
+
+ No normal access to updates
+
+ 0
+ 0
+ 0
+ 0
+
+
diff --git a/forwardport/data/views.xml b/forwardport/data/views.xml
new file mode 100644
index 00000000..9084c7ee
--- /dev/null
+++ b/forwardport/data/views.xml
@@ -0,0 +1,63 @@
+
+
+ Show forwardport project fields
+
+ runbot_merge.project
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Show forwardport PR fields
+
+ runbot_merge.pull_requests
+
+
+
+
+
+
+
+
+
+
+
+
+ Detached from forward porting (either conflicting
+ or explicitly updated).
+
+
+
+
+
+
+
+
+
+
+
diff --git a/forwardport/models/__init__.py b/forwardport/models/__init__.py
new file mode 100644
index 00000000..f3fb02c7
--- /dev/null
+++ b/forwardport/models/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+from . import project
+from . import forwardport
diff --git a/forwardport/models/forwardport.py b/forwardport/models/forwardport.py
new file mode 100644
index 00000000..116e615f
--- /dev/null
+++ b/forwardport/models/forwardport.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+import logging
+from contextlib import ExitStack
+
+import subprocess
+
+from odoo import fields, models
+
+
+_logger = logging.getLogger(__name__)
+
+class Queue:
+ def _process_item(self):
+ raise NotImplementedError
+
+ def _process(self):
+ while True:
+ b = self.search([], limit=1)
+ if not b:
+ return
+
+ b._process_item()
+
+ b.unlink()
+ self.env.cr.commit()
+
+
+class BatchQueue(models.Model, Queue):
+ _name = 'forwardport.batches'
+ _description = 'batches which got merged and are candidates for forward-porting'
+
+ batch_id = fields.Many2one('runbot_merge.batch', required=True)
+ source = fields.Selection([
+ ('merge', 'Merge'),
+ ('fp', 'Forward Port Followup'),
+ ], required=True)
+
+ def _process_item(self):
+ batch = self.batch_id
+
+ # only some prs of the batch have a parent, that's weird
+ with_parent = batch.prs.filtered(lambda p: p.parent_id)
+ if with_parent and with_parent != batch.prs:
+ _logger.warn("Found a subset of batch %s (%s) with parents: %s, should probably investigate (normally either they're all parented or none are)", batch, batch.prs, with_parent)
+
+ newbatch = batch.prs._port_forward()
+ if newbatch:
+ _logger.info(
+ "Processing %s (from %s): %s (%s) -> %s (%s)",
+ self.id, self.source,
+ batch, batch.prs,
+ newbatch, newbatch.prs,
+ )
+ else: # reached end of seq (or batch is empty)
+ # FIXME: or configuration is fucky so doesn't want to FP (maybe should error and retry?)
+ _logger.info(
+ "Processing %s (from %s): %s (%s) -> end of the sequence",
+ self.id, self.source,
+ batch, batch.prs
+ )
+ batch.active = False
+
+class UpdateQueue(models.Model, Queue):
+ _name = 'forwardport.updates'
+ _description = 'if a forward-port PR gets updated & has followups (cherrypick succeeded) the followups need to be updated as well'
+
+ original_root = fields.Many2one('runbot_merge.pull_requests')
+ new_root = fields.Many2one('runbot_merge.pull_requests')
+
+ def _process_item(self):
+ previous = self.new_root
+ with ExitStack() as s:
+ for child in self.new_root._iter_descendants():
+ # QUESTION: update PR to draft if there are conflicts?
+ _, working_copy = previous._create_fp_branch(
+ child.target, child.refname, s)
+
+ new_head = working_copy.stdout().rev_parse(child.refname).stdout.decode().strip()
+ # update child's head to the head we're going to push
+ child.with_context(ignore_head_update=True).head = new_head
+ working_copy.push('-f', 'target', child.refname)
+ # committing here means github could technically trigger its
+ # webhook before sending a response, but committing before
+ # would mean we can update the PR in database but fail to
+ # update on github, which is probably worse?
+ # alternatively we can commit, push, and rollback if the push
+ # fails
+ # FIXME: handle failures (especially on non-first update)
+ self.env.cr.commit()
+
+ previous = child
diff --git a/forwardport/models/project.py b/forwardport/models/project.py
new file mode 100644
index 00000000..e868fc47
--- /dev/null
+++ b/forwardport/models/project.py
@@ -0,0 +1,689 @@
+# -*- coding: utf-8 -*-
+"""
+Technically could be independent from mergebot but would require a lot of
+duplicate work e.g. keeping track of statuses (including on commits which
+might not be in PRs yet), handling co-dependent PRs, ...
+
+However extending the mergebot also leads to messiness: fpbot should have
+its own user / feedback / API keys, mergebot and fpbot both have branch
+ordering but for mergebot it's completely cosmetics, being slaved to mergebot
+means PR creation is trickier (as mergebot assumes opened event will always
+lead to PR creation but fpbot wants to attach meaning to the PR when setting
+it up), ...
+"""
+import base64
+import contextlib
+import itertools
+import json
+import logging
+import os
+import pathlib
+import subprocess
+import tempfile
+
+import re
+import requests
+
+from odoo.exceptions import UserError
+from odoo.tools.appdirs import user_cache_dir
+
+from odoo import _, models, fields, api
+from odoo.addons.runbot_merge import utils
+from odoo.addons.runbot_merge.models.pull_requests import RPLUS
+
+
+_logger = logging.getLogger('odoo.addons.forwardport')
+
+class Project(models.Model):
+ _inherit = 'runbot_merge.project'
+
+ fp_github_token = fields.Char()
+ fp_github_name = fields.Char(store=True, compute="_compute_git_identity")
+ fp_github_email = fields.Char(store=True, compute="_compute_git_identity")
+
+ def _find_commands(self, comment):
+ if self.env.context.get('without_forward_port'):
+ return super()._find_commands(comment)
+
+ return re.findall(
+ '^\s*[@|#]?{}:? (.*)$'.format(self.fp_github_name),
+ comment, re.MULTILINE | re.IGNORECASE
+ ) + super()._find_commands(comment)
+
+ # technically the email could change at any moment...
+ @api.depends('fp_github_token')
+ def _compute_git_identity(self):
+ s = requests.Session()
+ for project in self:
+ if not project.fp_github_token:
+ continue
+ r0 = s.get('https://api.github.com/user', headers={
+ 'Authorization': 'token %s' % project.fp_github_token
+ })
+ if 'user:email' not in set(re.split(r',\s*', r0.headers['x-oauth-scopes'])):
+ raise UserError(_("The forward-port github token needs the user:email scope to fetch the bot's identity."))
+ r1 = s.get('https://api.github.com/user/emails', headers={
+ 'Authorization': 'token %s' % project.fp_github_token
+ })
+ if not (r0.ok and r1.ok):
+ _logger.warn("Failed to fetch bot information for project %s: %s", project.name, (r0.text or r0.content) if not r0.ok else (r1.text or r1.content))
+ continue
+ project.fp_github_name = r0.json()['login']
+ project.fp_github_email = next((
+ entry['email']
+ for entry in r1.json()
+ if entry['primary']
+ ), None)
+ if not project.fp_github_email:
+ raise UserError(_("The forward-port bot needs a primary email set up."))
+
+
+class Repository(models.Model):
+ _inherit = 'runbot_merge.repository'
+ fp_remote_target = fields.Char(help="where FP branches get pushed")
+
+class Branch(models.Model):
+ _inherit = 'runbot_merge.branch'
+
+ fp_sequence = fields.Integer(default=50)
+ fp_target = fields.Boolean(default=False)
+ fp_enabled = fields.Boolean(compute='_compute_fp_enabled')
+
+ @api.depends('active', 'fp_target')
+ def _compute_fp_enabled(self):
+ for b in self:
+ b.fp_enabled = b.active and b.fp_target
+
+ # FIXME: this should be per-project...
+ def _forward_port_ordered(self):
+ """ Returns all branches in forward port order (from the lowest to
+ the highest — usually master)
+ """
+ return self.search([], order=self._forward_port_ordering())
+
+ def _forward_port_ordering(self):
+ return ','.join(
+ f[:-5] if f.lower().endswith(' desc') else f + ' desc'
+ for f in re.split(r',\s*', 'fp_sequence,' + self._order)
+ )
+
+class PullRequests(models.Model):
+ _inherit = 'runbot_merge.pull_requests'
+
+ # TODO: delete remote branches of merged FP PRs
+
+ # QUESTION: should the limit be copied on each child, or should it be inferred from the parent? Also what happens when detaching, is the detached PR configured independently?
+ # QUESTION: what happens if the limit_id is deactivated with extant PRs?
+ limit_id = fields.Many2one(
+ 'runbot_merge.branch',
+ default=lambda self: self.env['runbot_merge.branch']._forward_port_ordered()[-1],
+ help="Up to which branch should this PR be forward-ported"
+ )
+
+ parent_id = fields.Many2one(
+ 'runbot_merge.pull_requests', index=True,
+ help="a PR with a parent is an automatic forward port"
+ )
+ source_id = fields.Many2one('runbot_merge.pull_requests', index=True, help="the original source of this FP even if parents were detached along the way")
+
+ refname = fields.Char(compute='_compute_refname')
+ @api.depends('label')
+ def _compute_refname(self):
+ for pr in self:
+ pr.refname = pr.label.split(':', 1)[-1]
+
+ def create(self, vals):
+ # PR opened event always creates a new PR, override so we can precreate PRs
+ existing = self.search([
+ ('repository', '=', vals['repository']),
+ ('number', '=', vals['number']),
+ ])
+ if existing:
+ return existing
+
+ if vals.get('parent_id') and 'source_id' not in vals:
+ vals['source_id'] = self.browse(vals['parent_id'])._get_root().id
+ return super().create(vals)
+
+ def write(self, vals):
+ # if the PR's head is updated, detach (should split off the FP lines as this is not the original code)
+ # TODO: better way to do this? Especially because we don't want to
+ # recursively create updates
+ # also a bit odd to only handle updating 1 head at a time, but then
+ # again 2 PRs with same head is weird so...
+ newhead = vals.get('head')
+ if newhead and not self.env.context.get('ignore_head_update') and newhead != self.head:
+ vals.setdefault('parent_id', False)
+ # if any children, this is an FP PR being updated, enqueue
+ # updating children
+ if self.search_count([('parent_id', '=', self.id)]):
+ self.env['forwardport.updates'].create({
+ 'original_root': self._get_root().id,
+ 'new_root': self.id
+ })
+
+ if vals.get('parent_id') and 'source_id' not in vals:
+ vals['source_id'] = self.browse(vals['parent_id'])._get_root().id
+ return super().write(vals)
+
+ def _try_closing(self, by):
+ r = super()._try_closing(by)
+ if r:
+ self.parent_id = False
+ return r
+
+ def _parse_commands(self, author, comment, login):
+ super(PullRequests, self.with_context(without_forward_port=True))._parse_commands(author, comment, login)
+
+ tokens = [
+ token
+ for line in re.findall('^\s*[@|#]?{}:? (.*)$'.format(self.repository.project_id.fp_github_name), comment, re.MULTILINE | re.IGNORECASE)
+ for token in line.split()
+ ]
+ if not tokens:
+ _logger.info("found no commands in comment of %s (%s) (%s)", author.github_login, author.display_name,
+ utils.shorten(comment, 50)
+ )
+ return
+
+ # TODO: don't use a mutable tokens iterator
+ tokens = iter(tokens)
+ for token in tokens:
+ if token in ('r+', 'review+') and self.source_id._pr_acl(author).is_reviewer:
+ # don't update the root ever
+ for pr in filter(lambda p: p.parent_id, self._iter_ancestors()):
+ newstate = RPLUS.get(pr.state)
+ if newstate:
+ pr.state = newstate
+ pr.reviewed_by = author
+ # TODO: logging & feedback
+ elif token == 'up' and next(tokens, None) == 'to' and self._pr_acl(author).is_reviewer:
+ limit = next(tokens, None)
+ if not limit:
+ msg = "Please provide a branch to forward-port to."
+ else:
+ limit_id = self.env['runbot_merge.branch'].with_context(active_test=False).search([
+ ('project_id', '=', self.repository.project_id.id),
+ ('name', '=', limit),
+ ])
+ if not limit_id:
+ msg = "There is no branch %r, it can't be used as a forward port target." % limit
+ elif not limit_id.fp_enabled:
+ msg = "Branch %r is disabled, it can't be used as a forward port target." % limit_id.name
+ else:
+ msg = "Forward-porting to %r." % limit_id.name
+ self.limit_id = limit_id
+
+ self.env['runbot_merge.pull_requests.feedback'].create({
+ 'repository': self.repository.id,
+ 'pull_request': self.number,
+ 'message': msg,
+ })
+
+ def _validate(self, statuses):
+ _logger = logging.getLogger(__name__).getChild('forwardport.next')
+ super()._validate(statuses)
+ # if the PR has a parent and is CI-validated, enqueue the next PR
+ for pr in self:
+ _logger.info('Checking if forward-port %s (%s)', pr, pr.number)
+ if not pr.parent_id or pr.state not in ('validated', 'ready'):
+ _logger.info('-> no parent or wrong state (%s)', pr.state)
+ continue
+ # if it already has a child, bail
+ if self.search_count([('parent_id', '=', self.id)]):
+ _logger.info('-> already has a child')
+ continue
+
+ # check if we've already selected this PR's batch for forward
+ # porting and just haven't come around to it yet
+ batch = pr.batch_id
+ _logger.info("%s %s %s", pr, batch, batch.prs)
+ if self.env['forwardport.batches'].search_count([('batch_id', '=', batch.id)]):
+ _logger.warn('-> already recorded')
+ continue
+
+ # check if batch-mate are all valid
+ mates = batch.prs
+ # wait until all of them are validated or ready
+ if any(pr.state not in ('validated', 'ready') for pr in mates):
+ _logger.warn("-> not ready (%s)", [(pr.number, pr.state) for pr in mates])
+ continue
+
+ # check that there's no weird-ass state
+ if not all(pr.parent_id for pr in mates):
+ _logger.warn("Found a batch (%s) with only some PRs having parents, ignoring", mates)
+ continue
+ if self.search_count([('parent_id', 'in', mates.ids)]):
+ _logger.warn("Found a batch (%s) with only some of the PRs having children", mates)
+ continue
+
+ _logger.info('-> ok')
+ self.env['forwardport.batches'].create({
+ 'batch_id': batch.id,
+ 'source': 'fp',
+ })
+
+ def _forward_port_sequence(self):
+ # risk: we assume disabled branches are still at the correct location
+ # in the FP sequence (in case a PR was merged to a branch which is now
+ # disabled, could happen right around the end of the support window)
+ # (or maybe we can ignore this entirely and assume all relevant
+ # branches are active?)
+ fp_complete = self.env['runbot_merge.branch'].with_context(active_test=False)._forward_port_ordered()
+ candidates = iter(fp_complete)
+ for b in candidates:
+ if b == self.target:
+ break
+ # the candidates iterator is just past the current branch's target
+ for target in candidates:
+ if target.fp_enabled:
+ yield target
+ if target == self.limit_id:
+ break
+
+ def _find_next_target(self, reference):
+ """ Finds the branch between target and limit_id which follows
+ reference
+ """
+ if reference.target == self.limit_id:
+ return
+ # NOTE: assumes even disabled branches are properly sequenced, would
+ # probably be a good idea to have the FP view show all branches
+ branches = list(self.env['runbot_merge.branch'].with_context(active_test=False)._forward_port_ordered())
+
+ # get all branches between max(root.target, ref.target) (excluded) and limit (included)
+ from_ = max(branches.index(self.target), branches.index(reference.target))
+ to_ = branches.index(self.limit_id)
+
+ # return the first active branch in the set
+ return next((
+ branch
+ for branch in branches[from_+1:to_+1]
+ if branch.fp_enabled
+ ), None)
+
+ def _iter_descendants(self):
+ pr = self
+ while True:
+ pr = self.search([('parent_id', '=', pr.id)])
+ if pr:
+ yield pr
+ else:
+ break
+
+ def _iter_ancestors(self):
+ while self:
+ yield self
+ self = self.parent_id
+
+ def _get_root(self):
+ root = self
+ while root.parent_id:
+ root = root.parent_id
+ return root
+
+ def _port_forward(self):
+ if not self:
+ return
+
+ ref = self[0]
+
+ base = ref.source_id or ref._get_root()
+ target = base._find_next_target(ref)
+ if target is None:
+ _logger.info(
+ "Will not forward-port %s#%s: no next target",
+ ref.repository.name, ref.number,
+ )
+ return # QUESTION: do the prs need to be updated?
+
+ proj = self.mapped('target.project_id')
+ if not proj.fp_github_token:
+ _logger.warn(
+ "Can not forward-port %s#%s: no token on project %s",
+ ref.repository.name, ref.number,
+ proj.name
+ )
+ return
+
+ notarget = [p.repository.name for p in self if not p.repository.fp_remote_target]
+ if notarget:
+ _logger.warn(
+ "Can not forward-port %s: repos %s don't have a remote configured",
+ self, ', '.join(notarget)
+ )
+ return
+
+ # take only the branch bit
+ new_branch = '%s-%s-%s-forwardport' % (
+ target.name,
+ base.refname,
+ # avoid collisions between fp branches (labels can be reused
+ # or conflict especially as we're chopping off the owner)
+ base64.b32encode(os.urandom(5)).decode()
+ )
+ # TODO: send outputs to logging?
+ conflicts = {}
+ with contextlib.ExitStack() as s:
+ for pr in self:
+ conflicts[pr], working_copy = pr._create_fp_branch(
+ target, new_branch, s)
+
+ working_copy.push('target', new_branch)
+
+ has_conflicts = any(conflicts.values())
+ # problemo: this should forward port a batch at a time, if porting
+ # one of the PRs in the batch fails is huge problem, though this loop
+ # only concerns itself with the creation of the followup objects so...
+ new_batch = self.browse(())
+ for pr in self:
+ owner, _ = pr.repository.fp_remote_target.split('/', 1)
+ source = pr.source_id or pr
+ message = source.message
+ (h, out, err) = conflicts.get(pr) or (None, None, None)
+ if h:
+ message = """Cherrypicking %s of source #%d failed with the following
+
+stdout:
+```
+%s
+```
+
+stderr:
+```
+%s
+```
+
+Either perform the forward-port manually (and push to this branch, proceeding
+as usual) or close this PR (maybe?).
+
+In the former case, you may want to edit this PR message as well.
+""" % (h, source.number, out, err)
+
+ r = requests.post(
+ 'https://api.github.com/repos/{}/pulls'.format(pr.repository.name), json={
+ 'title': "Forward Port of #%d to %s%s" % (
+ source.number,
+ target.name,
+ ' (failed)' if has_conflicts else ''
+ ),
+ 'message': message,
+ 'head': '%s:%s' % (owner, new_branch),
+ 'base': target.name,
+ 'draft': has_conflicts,
+ }, headers={
+ 'Accept': 'application/vnd.github.shadow-cat-preview+json',
+ 'Authorization': 'token %s' % pr.repository.project_id.fp_github_token,
+ }
+ )
+ assert 200 <= r.status_code < 300, r.json()
+ new_pr = self._from_gh(r.json())
+ _logger.info("Created forward-port PR %s", new_pr)
+ new_batch |= new_pr
+
+ new_pr.write({
+ 'merge_method': pr.merge_method,
+ 'source_id': source.id,
+ # only link to previous PR of sequence if cherrypick passed
+ 'parent_id': pr.id if not has_conflicts else False,
+ })
+ assignees = (new_pr.source_id.author | new_pr.source_id.reviewed_by).mapped('github_login')
+ self.env['runbot_merge.pull_requests.feedback'].create({
+ 'repository': new_pr.repository.id,
+ 'pull_request': new_pr.number,
+ 'message': "Ping %s" % ', '.join('@' + login for login in assignees if login),
+ })
+ # not great but we probably want to avoid the risk of the webhook
+ # creating the PR from under us. There's still a "hole" between
+ # the POST being executed on gh and the commit but...
+ self.env.cr.commit()
+
+ # batch the PRs so _validate can perform the followup FP properly
+ # (with the entire batch). If there are conflict then create a
+ # deactivated batch so the interface is coherent but we don't pickup
+ # an active batch we're never going to deactivate.
+ return self.env['runbot_merge.batch'].create({
+ 'target': target.id,
+ 'prs': [(6, 0, new_batch.ids)],
+ 'active': not has_conflicts,
+ })
+
+ def _create_fp_branch(self, target_branch, fp_branch_name, cleanup):
+ """ Creates a forward-port for the current PR to ``target_branch`` under
+ ``fp_branch_name``.
+
+ :param target_branch: the branch to port forward to
+ :param fp_branch_name: the name of the branch to create the FP under
+ :param ExitStack cleanup: so the working directories can be cleaned up
+ :return: (conflictp, working_copy)
+ :rtype: (bool, Repo)
+ """
+ source = self._get_local_directory()
+ # update all the branches & PRs
+ source.with_params('gc.pruneExpire=1.day.ago').fetch('-p', 'origin')
+ # FIXME: check that pr.head is pull/{number}'s head instead?
+ source.cat_file(e=self.head)
+ # create working copy
+ working_copy = source.clone(
+ cleanup.enter_context(tempfile.TemporaryDirectory()),
+ branch=target_branch.name
+ )
+ project_id = self.repository.project_id
+ # configure local repo so commits automatically pickup bot identity
+ working_copy.config('--local', 'user.name', project_id.fp_github_name)
+ working_copy.config('--local', 'user.email', project_id.fp_github_email)
+ # add target remote
+ working_copy.remote(
+ 'add', 'target',
+ 'https://{p.fp_github_name}:{p.fp_github_token}@github.com/{r.fp_remote_target}'.format(
+ r=self.repository,
+ p=project_id
+ )
+ )
+ working_copy.checkout(b=fp_branch_name)
+
+ root = self._get_root()
+ try:
+ root._cherry_pick(working_copy)
+ except CherrypickError as e:
+ # apply the diff of the PR to the target
+ # in git diff, "A...B" selects the bits of B not in A
+ # (which is the other way around from gitrevisions(7)
+ # because git)
+ diff = working_copy.stdout().diff('%s...%s' % root._reference_branches()).stdout
+ # apply probably fails (because conflict, we don't care
+ working_copy.with_config(input=diff).apply('-3', '-')
+
+ working_copy.commit(
+ a=True, allow_empty=True,
+ message="""Cherry pick of %s failed
+
+stdout:
+%s
+stderr:
+%s
+""" % e.args)
+ return e.args, working_copy
+ else:
+ return None, working_copy
+
+ def _cherry_pick(self, working_copy):
+ """ Cherrypicks ``self`` into the working copy
+
+ :return: ``True`` if the cherrypick was successful, ``False`` otherwise
+ """
+ # .cherrypick.
+ logger = _logger.getChild('cherrypick').getChild(str(self.number))
+ cmap = json.loads(self.commits_map)
+
+ # original head so we can reset
+ h = working_copy.stdout().rev_parse('HEAD').stdout
+
+ commits_range = self._reference_branches()
+ # need to cherry-pick commit by commit because `-n` doesn't yield
+ # back control on each op. `-e` with VISUAL=false kinda works but
+ # doesn't seem to provide a good way to know when we're done
+ # cherrypicking
+ rev_list = working_copy.lazy().stdout()\
+ .rev_list('--reverse', '%s..%s' % commits_range)
+ commits = list(rev_list.stdout)
+ logger.info("%d commit(s) on %s", len(commits), h.decode())
+ for commit in commits:
+ commit = commit.decode().strip()
+ r = working_copy.with_config(
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ check=False,
+ ).cherry_pick(commit)
+
+ if r.returncode: # pick failed, reset and bail
+ logger.info("%s: failed", commit)
+ working_copy.reset('--hard', h.decode().strip())
+ raise CherrypickError(commit, r.stdout.decode(), r.stderr.decode())
+
+ original_msg = working_copy.stdout().show('-s', '--format=%B', commit).stdout.decode()
+
+ msg = self._parse_commit_message(original_msg)
+
+ # original signed-off-er should be retained but didn't necessarily
+ # sign off here, so convert signed-off-by to something else
+ sob = msg.headers.getlist('signed-off-by')
+ if sob:
+ msg.headers.remove('signed-off-by')
+ msg.headers.extend(
+ ('original-signed-off-by', v)
+ for v in sob
+ )
+ # write the *merged* commit as "original", not the PR's
+ msg.headers['x-original-commit'] = cmap.get(commit, commit)
+
+ # replace existing commit message with massaged one
+ working_copy\
+ .with_config(input=str(msg).encode())\
+ .commit(amend=True, file='-')
+ new = working_copy.stdout().rev_parse('HEAD').stdout.decode()
+ logger.info('%s: success -> %s', commit, new)
+
+ def _reference_branches(self):
+ """
+ :returns: (PR target, PR branch)
+ """
+ ancestor_branch = 'origin/pull/%d' % self.number
+ source_target = 'origin/%s' % self.target.name
+ return source_target, ancestor_branch
+
+ def _get_local_directory(self):
+ repos_dir = pathlib.Path(user_cache_dir('forwardport'))
+ repos_dir.mkdir(parents=True, exist_ok=True)
+ repo_dir = repos_dir / self.repository.name
+
+ if repo_dir.is_dir():
+ return git(repo_dir)
+ else:
+ subprocess.run([
+ 'git', 'clone', '--bare',
+ 'https://{}:{}@github.com/{}'.format(
+ self.repository.project_id.fp_github_name,
+ self.repository.project_id.fp_github_token,
+ self.repository.name,
+ ),
+ str(repo_dir)
+ ], check=True)
+ # add PR branches as local but namespaced (?)
+ repo = git(repo_dir)
+ repo.config('--add', 'remote.origin.fetch', '+refs/pull/*/head:refs/heads/pull/*')
+ return repo
+
+class Stagings(models.Model):
+ _inherit = 'runbot_merge.stagings'
+
+ def write(self, vals):
+ r = super().write(vals)
+ if vals.get('active') == False and self.state == 'success':
+ for b in self.with_context(active_test=False).batch_ids:
+ # if batch PRs have parents they're part of an FP sequence and
+ # thus handled separately
+ if not b.mapped('prs.parent_id'):
+ self.env['forwardport.batches'].create({
+ 'batch_id': b.id,
+ 'source': 'merge',
+ })
+ return r
+
+
+def git(directory): return Repo(directory, check=True)
+class Repo:
+ def __init__(self, directory, **config):
+ self._directory = str(directory)
+ self._config = config
+ self._params = ()
+ self._opener = subprocess.run
+
+ def __getattr__(self, name):
+ return GitCommand(self, name.replace('_', '-'))
+
+ def _run(self, *args, **kwargs):
+ opts = {**self._config, **kwargs}
+ return self._opener(
+ ('git', '-C', self._directory)
+ + tuple(itertools.chain.from_iterable(('-c', p) for p in self._params))
+ + args,
+ **opts
+ )
+
+ def stdout(self, flag=True):
+ if flag is True:
+ return self.with_config(stdout=subprocess.PIPE)
+ elif flag is False:
+ return self.with_config(stdout=None)
+ return self.with_config(stdout=flag)
+
+ def lazy(self):
+ r = self.with_config()
+ r._config.pop('check', None)
+ r._opener = subprocess.Popen
+ return r
+
+ def check(self, flag):
+ return self.with_config(check=flag)
+
+ def with_config(self, **kw):
+ opts = {**self._config, **kw}
+ r = Repo(self._directory, **opts)
+ r._opener = self._opener
+ r._params = self._params
+ return r
+
+ def with_params(self, *args):
+ r = self.with_config()
+ r._params = args
+ return r
+
+ def clone(self, to, branch=None):
+ self._run(
+ 'git', 'clone', '-s',
+ *([] if branch is None else ['-b', branch]),
+ self._directory, to,
+ )
+ return Repo(to)
+
+class GitCommand:
+ def __init__(self, repo, name):
+ self._name = name
+ self._repo = repo
+
+ def __call__(self, *args, **kwargs):
+ return self._repo._run(self._name, *args, *self._to_options(kwargs))
+
+ def _to_options(self, d):
+ for k, v in d.items():
+ if len(k) == 1:
+ yield '-' + k
+ else:
+ yield '--' + k.replace('_', '-')
+ if v not in (None, True):
+ assert v is not False
+ yield str(v)
+
+class CherrypickError(Exception):
+ ...
diff --git a/forwardport/tests/conftest.py b/forwardport/tests/conftest.py
new file mode 100644
index 00000000..dbd5ba26
--- /dev/null
+++ b/forwardport/tests/conftest.py
@@ -0,0 +1,701 @@
+# -*- coding: utf-8 -*-
+import base64
+import collections
+import copy
+import itertools
+import logging
+import pathlib
+import socket
+import time
+import uuid
+import xmlrpc.client
+from contextlib import closing
+
+import pytest
+import subprocess
+
+import re
+import requests
+from shutil import rmtree
+
+from odoo.tools.appdirs import user_cache_dir
+
+DEFAULT_CRONS = [
+ 'runbot_merge.process_updated_commits',
+ 'runbot_merge.merge_cron',
+ 'forwardport.port_forward',
+ 'forwardport.updates',
+ 'runbot_merge.check_linked_prs_status',
+ 'runbot_merge.feedback_cron',
+]
+
+
+def pytest_report_header(config):
+ return 'Running against database ' + config.getoption('--db')
+
+def pytest_addoption(parser):
+ parser.addoption('--db', help="DB to run the tests against", default=str(uuid.uuid4()))
+ parser.addoption('--addons-path')
+ parser.addoption("--no-delete", action="store_true", help="Don't delete repo after a failed run")
+
+def wait_for_hook(n=1):
+ time.sleep(10 * n)
+
+def wait_for_server(db, port, proc, mod, timeout=120):
+ """ Polls for server to be response & have installed our module.
+
+ Raises socket.timeout on failure
+ """
+ limit = time.time() + timeout
+ while True:
+ if proc.poll() is not None:
+ raise Exception("Server unexpectedly closed")
+
+ try:
+ uid = xmlrpc.client.ServerProxy(
+ 'http://localhost:{}/xmlrpc/2/common'.format(port))\
+ .authenticate(db, 'admin', 'admin', {})
+ mods = xmlrpc.client.ServerProxy(
+ 'http://localhost:{}/xmlrpc/2/object'.format(port))\
+ .execute_kw(
+ db, uid, 'admin', 'ir.module.module', 'search_read', [
+ [('name', '=', mod)], ['state']
+ ])
+ if mods and mods[0].get('state') == 'installed':
+ break
+ except ConnectionRefusedError:
+ if time.time() > limit:
+ raise socket.timeout()
+
+# public_repo — necessary to leave comments
+# admin:repo_hook — to set up hooks (duh)
+# delete_repo — to cleanup repos created under a user
+# user:email — fetch token/user's email addresses
+TOKEN_SCOPES = {
+ 'github': {'admin:repo_hook', 'delete_repo', 'public_repo', 'user:email'},
+ # TODO: user:email so they can fetch the user's email?
+ 'role_reviewer': {'public_repo'},# 'delete_repo'},
+ 'role_self_reviewer': {'public_repo'},# 'delete_repo'},
+ 'role_other': {'public_repo'},# 'delete_repo'},
+}
+@pytest.fixture(autouse=True, scope='session')
+def _check_scopes(config):
+ for section, vals in config.items():
+ required_scopes = TOKEN_SCOPES.get(section)
+ if required_scopes is None:
+ continue
+
+ response = requests.get('https://api.github.com/rate_limit', headers={
+ 'Authorization': 'token %s' % vals['token']
+ })
+ assert response.status_code == 200
+ x_oauth_scopes = response.headers['X-OAuth-Scopes']
+ token_scopes = set(re.split(r',\s+', x_oauth_scopes))
+ assert token_scopes >= required_scopes, \
+ "%s should have scopes %s, found %s" % (section, token_scopes, required_scopes)
+
+@pytest.fixture(autouse=True)
+def _cleanup_cache(config, users):
+ """ forwardport has a repo cache which it assumes is unique per name
+ but tests always use the same repo paths / names for different repos
+ (the repos get re-created), leading to divergent repo histories.
+
+ So clear cache after each test, two tests should not share repos.
+ """
+ yield
+ cache_root = pathlib.Path(user_cache_dir('forwardport'))
+ rmtree(cache_root / config['github']['owner'], ignore_errors=True)
+ for login in users.values():
+ rmtree(cache_root / login, ignore_errors=True)
+
+@pytest.fixture(autouse=True)
+def users(users_):
+ return users_
+
+@pytest.fixture
+def project(env, config):
+ return env['runbot_merge.project'].create({
+ 'name': 'odoo',
+ 'github_token': config['github']['token'],
+ 'github_prefix': 'hansen',
+ 'fp_github_token': config['github']['token'],
+ 'required_statuses': 'legal/cla,ci/runbot',
+ })
+
+@pytest.fixture(scope='session')
+def module():
+ """ When a test function is (going to be) run, selects the containing
+ module (as needing to be installed)
+ """
+ # NOTE: no request.fspath (because no request.function) in session-scoped fixture so can't put module() at the toplevel
+ return 'forwardport'
+
+@pytest.fixture(scope='session')
+def port():
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
+ s.bind(('', 0))
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ return s.getsockname()[1]
+
+@pytest.fixture(scope='session')
+def dbcache(request, module):
+ """ Creates template DB once per run, then just duplicates it before
+ starting odoo and running the testcase
+ """
+ db = request.config.getoption('--db')
+ subprocess.run([
+ 'odoo', '--no-http',
+ '--addons-path', request.config.getoption('--addons-path'),
+ '-d', db, '-i', module,
+ '--max-cron-threads', '0',
+ '--stop-after-init'
+ ], check=True)
+ yield db
+ subprocess.run(['dropdb', db])
+
+@pytest.fixture
+def db(request, dbcache):
+ rundb = str(uuid.uuid4())
+ subprocess.run(['createdb', '-T', dbcache, rundb], check=True)
+
+ yield rundb
+
+ if not request.config.getoption('--no-delete'):
+ subprocess.run(['dropdb', rundb], check=True)
+
+@pytest.fixture
+def server(request, db, port, module):
+ p = subprocess.Popen([
+ 'odoo', '--http-port', str(port),
+ '--addons-path', request.config.getoption('--addons-path'),
+ '-d', db,
+ '--max-cron-threads', '0', # disable cron threads (we're running crons by hand)
+ ])
+
+ try:
+ wait_for_server(db, port, p, module)
+
+ yield p
+ finally:
+ p.terminate()
+ p.wait(timeout=30)
+
+@pytest.fixture
+def env(port, server, db):
+ yield Environment(port, db)
+
+# users is just so I can avoid autouse on toplevel users fixture b/c it (seems
+# to) break the existing local tests
+@pytest.fixture
+def make_repo(request, config, tunnel, users):
+ owner = config['github']['owner']
+ github = requests.Session()
+ github.headers['Authorization'] = 'token %s' % config['github']['token']
+
+ # check whether "owner" is a user or an org, as repo-creation endpoint is
+ # different
+ q = github.get('https://api.github.com/users/{}'.format(owner))
+ q.raise_for_status()
+ if q.json().get('type') == 'Organization':
+ endpoint = 'https://api.github.com/orgs/{}/repos'.format(owner)
+ else:
+ endpoint = 'https://api.github.com/user/repos'
+ r = github.get('https://api.github.com/user')
+ r.raise_for_status()
+ assert r.json()['login'] == owner
+
+ repos = []
+ def repomaker(name):
+ fullname = '{}/{}'.format(owner, name)
+ repo_url = 'https://api.github.com/repos/{}'.format(fullname)
+ if request.config.getoption('--no-delete'):
+ if github.head(repo_url).ok:
+ pytest.skip("Repository {} already exists".format(fullname))
+ else:
+ # just try to delete the repo, we don't really care
+ if github.delete(repo_url).ok:
+ # if we did delete a repo, wait a bit as gh might need to
+ # propagate the thing?
+ time.sleep(30)
+
+ # create repo
+ r = github.post(endpoint, json={
+ 'name': name,
+ 'has_issues': False,
+ 'has_projects': False,
+ 'has_wiki': False,
+ 'auto_init': False,
+ # at least one merge method must be enabled :(
+ 'allow_squash_merge': False,
+ # 'allow_merge_commit': False,
+ 'allow_rebase_merge': False,
+ })
+ r.raise_for_status()
+
+ new_repo = Repo(github, fullname, repos)
+ # create webhook
+ github.post('{}/hooks'.format(repo_url), json={
+ 'name': 'web',
+ 'config': {
+ 'url': '{}/runbot_merge/hooks'.format(tunnel),
+ 'content_type': 'json',
+ 'insecure_ssl': '1',
+ },
+ 'events': ['pull_request', 'issue_comment', 'status', 'pull_request_review']
+ })
+
+ github.put('https://api.github.com/repos/{}/contents/{}'.format(fullname, 'a'), json={
+ 'path': 'a',
+ 'message': 'github returns a 409 (Git Repository is Empty) if trying to create a tree in a repo with no objects',
+ 'content': base64.b64encode(b'whee').decode('ascii'),
+ 'branch': 'garbage_%s' % uuid.uuid4()
+ }).raise_for_status()
+
+ return new_repo
+
+ yield repomaker
+
+ if not request.config.getoption('--no-delete'):
+ for repo in reversed(repos):
+ repo.delete()
+
+Commit = collections.namedtuple('Commit', 'id tree message author committer parents')
+class Repo:
+ def __init__(self, session, fullname, repos):
+ self._session = session
+ self.name = fullname
+ self._repos = repos
+ self.hook = False
+ repos.append(self)
+
+ # unwatch repo
+ self.unsubscribe()
+
+ def unsubscribe(self, token=None):
+ self._get_session(token).put('https://api.github.com/repos/{}/subscription'.format(self.name), json={
+ 'subscribed': False,
+ 'ignored': True,
+ })
+
+ def delete(self):
+ r = self._session.delete('https://api.github.com/repos/{}'.format(self.name))
+ if r.status_code != 204:
+ logging.getLogger(__name__).warn("Unable to delete repository %s", self.name)
+
+ def commit(self, ref):
+ if not re.match(r'[0-9a-f]{40}', ref):
+ if not ref.startswith(('heads/', 'refs/heads/')):
+ ref = 'refs/heads/' + ref
+ # apparently heads/ ~ refs/heads/ but are not
+ # necessarily up to date ??? unlike the git ref system where :ref
+ # starts at heads/
+ if ref.startswith('heads/'):
+ ref = 'refs/' + ref
+
+ r = self._session.get('https://api.github.com/repos/{}/commits/{}'.format(self.name, ref))
+ response = r.json()
+ assert 200 <= r.status_code < 300, response
+
+ return self._commit_from_gh(response)
+
+ def _commit_from_gh(self, gh_commit):
+ c = gh_commit['commit']
+ return Commit(
+ id=gh_commit['sha'],
+ tree=c['tree']['sha'],
+ message=c['message'],
+ author=c['author'],
+ committer=c['committer'],
+ parents=[p['sha'] for p in gh_commit['parents']],
+ )
+
+ def log(self, ref_or_sha):
+ for page in itertools.count(1):
+ r = self._session.get(
+ 'https://api.github.com/repos/{}/commits'.format(self.name),
+ params={'sha': ref_or_sha, 'page': page}
+ )
+ assert 200 <= r.status_code < 300, r.json()
+ yield from map(self._commit_from_gh, r.json())
+ if not r.links.get('next'):
+ return
+
+ def read_tree(self, commit):
+ """ read tree object from commit
+
+ :param Commit commit:
+ :rtype: Dict[str, str]
+ """
+ r = self._session.get('https://api.github.com/repos/{}/git/trees/{}'.format(self.name, commit.tree))
+ assert 200 <= r.status_code < 300, r.json()
+
+ # read tree's blobs
+ tree = {}
+ for t in r.json()['tree']:
+ assert t['type'] == 'blob', "we're *not* doing recursive trees in test cases"
+ r = self._session.get('https://api.github.com/repos/{}/git/blobs/{}'.format(self.name, t['sha']))
+ assert 200 <= r.status_code < 300, r.json()
+ tree[t['path']] = base64.b64decode(r.json()['content']).decode()
+
+ return tree
+
+ def make_ref(self, name, commit, force=False):
+ assert self.hook
+ assert name.startswith('heads/')
+ r = self._session.post('https://api.github.com/repos/{}/git/refs'.format(self.name), json={
+ 'ref': 'refs/' + name,
+ 'sha': commit,
+ })
+ if force and r.status_code == 422:
+ self.update_ref(name, commit, force=True)
+ return
+ assert 200 <= r.status_code < 300, r.json()
+
+ def update_ref(self, name, commit, force=False):
+ r = self._session.patch('https://api.github.com/repos/{}/git/refs/{}'.format(self.name, name), json={'sha': commit, 'force': force})
+ assert 200 <= r.status_code < 300, r.json()
+
+ def make_commits(self, root, *commits, ref=None):
+ assert self.hook
+ if root:
+ c = self.commit(root)
+ tree = c.tree
+ parents = [c.id]
+ else:
+ tree = None
+ parents = []
+
+ hashes = []
+ for commit in commits:
+ r = self._session.post('https://api.github.com/repos/{}/git/trees'.format(self.name), json={
+ 'tree': [
+ {'path': k, 'mode': '100644', 'type': 'blob', 'content': v}
+ for k, v in commit.tree.items()
+ ],
+ 'base_tree': tree
+ })
+ assert 200 <= r.status_code < 300, r.json()
+ tree = r.json()['sha']
+
+ data = {
+ 'parents': parents,
+ 'message': commit.message,
+ 'tree': tree,
+ }
+ if commit.author:
+ data['author'] = commit.author
+ if commit.committer:
+ data['committer'] = commit.committer
+
+ r = self._session.post('https://api.github.com/repos/{}/git/commits'.format(self.name), json=data)
+ assert 200 <= r.status_code < 300, r.json()
+
+ hashes.append(r.json()['sha'])
+ parents = [hashes[-1]]
+
+ if ref:
+ self.make_ref(ref, hashes[-1], force=True)
+
+ return hashes
+
+ def fork(self, *, token=None):
+ s = self._get_session(token)
+
+ r = s.post('https://api.github.com/repos/{}/forks'.format(self.name))
+ assert 200 <= r.status_code < 300, r.json()
+
+ repo_name = r.json()['full_name']
+ repo_url = 'https://api.github.com/repos/' + repo_name
+ # poll for end of fork
+ limit = time.time() + 60
+ while s.head(repo_url, timeout=5).status_code != 200:
+ if time.time() > limit:
+ raise TimeoutError("No response for repo %s over 60s" % repo_name)
+ time.sleep(1)
+
+ return Repo(s, repo_name, self._repos)
+
+ def _get_session(self, token):
+ s = self._session
+ if token:
+ s = requests.Session()
+ s.headers['Authorization'] = 'token %s' % token
+ return s
+
+ def get_pr(self, number):
+ # ensure PR exists before returning it
+ self._session.head('https://api.github.com/repos/{}/pulls/{}'.format(
+ self.name,
+ number,
+ )).raise_for_status()
+ return PR(self, number)
+
+ def make_pr(self, *, title=None, body=None, target, head, token=None):
+ assert self.hook
+ self.hook = 2
+
+ if title is None:
+ assert ":" not in head, \
+ "will not auto-infer titles for PRs in a remote repo"
+ c = self.commit(head)
+ parts = iter(c.message.split('\n\n', 1))
+ title = next(parts)
+ body = next(parts, None)
+
+ headers = {}
+ if token:
+ headers['Authorization'] = 'token {}'.format(token)
+
+ r = self._session.post(
+ 'https://api.github.com/repos/{}/pulls'.format(self.name),
+ json={
+ 'title': title,
+ 'body': body,
+ 'head': head,
+ 'base': target,
+ },
+ headers=headers,
+ )
+ pr = r.json()
+ assert 200 <= r.status_code < 300, pr
+
+ return PR(self, pr['number'])
+
+ def post_status(self, ref, status, context='default', **kw):
+ assert self.hook
+ assert status in ('error', 'failure', 'pending', 'success')
+ r = self._session.post('https://api.github.com/repos/{}/statuses/{}'.format(self.name, self.commit(ref).id), json={
+ 'state': status,
+ 'context': context,
+ **kw
+ })
+ assert 200 <= r.status_code < 300, r.json()
+
+ def __enter__(self):
+ self.hook = 1
+ return self
+ def __exit__(self, *args):
+ wait_for_hook(self.hook)
+ self.hook = 0
+
+class PR:
+ __slots__ = ['number', 'repo']
+
+ def __init__(self, repo, number):
+ self.repo = repo
+ self.number = number
+
+ @property
+ def _pr(self):
+ r = self.repo._session.get('https://api.github.com/repos/{}/pulls/{}'.format(self.repo.name, self.number))
+ assert 200 <= r.status_code < 300, r.json()
+ return r.json()
+
+ @property
+ def head(self):
+ return self._pr['head']['sha']
+
+ @property
+ def comments(self):
+ r = self.repo._session.get('https://api.github.com/repos/{}/issues/{}/comments'.format(self.repo.name, self.number))
+ assert 200 <= r.status_code < 300, r.json()
+ return [
+ (c['user']['login'], c['body'])
+ for c in r.json()
+ ]
+
+ def post_comment(self, body, token=None):
+ assert self.repo.hook
+ headers = {}
+ if token:
+ headers['Authorization'] = 'token %s' % token
+ r = self.repo._session.post(
+ 'https://api.github.com/repos/{}/issues/{}/comments'.format(self.repo.name, self.number),
+ json={'body': body},
+ headers=headers,
+ )
+ assert 200 <= r.status_code < 300, r.json()
+ return r.json()['id']
+
+ def _set_prop(self, prop, value):
+ assert self.repo.hook
+ r = self.repo._session.patch('https://api.github.com/repos/{}/pulls/{}'.format(self.repo.name, self.number), json={
+ prop: value
+ })
+ assert 200 <= r.status_code < 300, r.json()
+
+ def open(self):
+ self._set_prop('state', 'open')
+
+ def close(self):
+ self._set_prop('state', 'closed')
+
+ @property
+ def branch(self):
+ r = self.repo._session.get('https://api.github.com/repos/{}/pulls/{}'.format(
+ self.repo.name,
+ self.number,
+ ))
+ assert 200 <= r.status_code < 300, r.json()
+ info = r.json()
+
+ repo = self.repo
+ reponame = info['head']['repo']['full_name']
+ if reponame != self.repo.name:
+ # not sure deep copying the session object is safe / proper...
+ repo = Repo(copy.deepcopy(self.repo._session), reponame, [])
+
+ return PRBranch(repo, info['head']['ref'])
+PRBranch = collections.namedtuple('PRBranch', 'repo branch')
+
+class Environment:
+ def __init__(self, port, db):
+ self._uid = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/common'.format(port)).authenticate(db, 'admin', 'admin', {})
+ self._object = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/object'.format(port))
+ self._db = db
+
+ def __call__(self, model, method, *args, **kwargs):
+ return self._object.execute_kw(
+ self._db, self._uid, 'admin',
+ model, method,
+ args, kwargs
+ )
+
+ def __getitem__(self, name):
+ return Model(self, name)
+
+ def run_crons(self, *xids):
+ crons = xids or DEFAULT_CRONS
+ for xid in crons:
+ _, model, cron_id = self('ir.model.data', 'xmlid_lookup', xid)
+ assert model == 'ir.cron', "Expected {} to be a cron, got {}".format(xid, model)
+ self('ir.cron', 'method_direct_trigger', [cron_id])
+ # sleep for some time as a lot of crap may have happened (?)
+ wait_for_hook()
+
+class Model:
+ __slots__ = ['_env', '_model', '_ids', '_fields']
+ def __init__(self, env, model, ids=(), fields=None):
+ object.__setattr__(self, '_env', env)
+ object.__setattr__(self, '_model', model)
+ object.__setattr__(self, '_ids', tuple(ids or ()))
+
+ object.__setattr__(self, '_fields', fields or self._env(self._model, 'fields_get', attributes=['type', 'relation']))
+
+ @property
+ def ids(self):
+ return self._ids
+
+ def __bool__(self):
+ return bool(self._ids)
+
+ def __len__(self):
+ return len(self._ids)
+
+ def __eq__(self, other):
+ if not isinstance(other, Model):
+ return NotImplemented
+ return self._model == other._model and self._ids == other._ids
+
+ def __repr__(self):
+ return "{}({})".format(self._model, ', '.join(str(id) for id in self._ids))
+
+ def exists(self):
+ ids = self._env(self._model, 'exists', self._ids)
+ return Model(self._env, self._model, ids)
+
+ def search(self, *args, **kwargs):
+ ids = self._env(self._model, 'search', *args, **kwargs)
+ return Model(self._env, self._model, ids)
+
+ def create(self, values):
+ return Model(self._env, self._model, [self._env(self._model, 'create', values)])
+
+ def write(self, values):
+ return self._env(self._model, 'write', self._ids, values)
+
+ def read(self, fields):
+ return self._env(self._model, 'read', self._ids, fields)
+
+ def unlink(self):
+ return self._env(self._model, 'unlink', self._ids)
+
+ def __getitem__(self, index):
+ if isinstance(index, str):
+ return getattr(self, index)
+ ids = self._ids[index]
+ if isinstance(ids, int):
+ ids = [ids]
+
+ return Model(self._env, self._model, ids, fields=self._fields)
+
+ def __getattr__(self, fieldname):
+ if not self._ids:
+ return False
+
+ assert len(self._ids) == 1
+ if fieldname == 'id':
+ return self._ids[0]
+
+ val = self.read([fieldname])[0][fieldname]
+ field_description = self._fields[fieldname]
+ if field_description['type'] in ('many2one', 'one2many', 'many2many'):
+ val = val or []
+ if field_description['type'] == 'many2one':
+ val = val[:1] # (id, name) => [id]
+ return Model(self._env, field_description['relation'], val)
+
+ return val
+
+ def __setattr__(self, fieldname, value):
+ assert self._fields[fieldname]['type'] not in ('many2one', 'one2many', 'many2many')
+ self._env(self._model, 'write', self._ids, {fieldname: value})
+
+ def __iter__(self):
+ return (
+ Model(self._env, self._model, [i], fields=self._fields)
+ for i in self._ids
+ )
+
+ def mapped(self, path):
+ field, *rest = path.split('.', 1)
+ descr = self._fields[field]
+ if descr['type'] in ('many2one', 'one2many', 'many2many'):
+ result = Model(self._env, descr['relation'])
+ for record in self:
+ result |= getattr(record, field)
+
+ return result.mapped(rest[0]) if rest else result
+
+ assert not rest
+ return [getattr(r, field) for r in self]
+
+ def filtered(self, fn):
+ result = Model(self._env, self._model, fields=self._fields)
+ for record in self:
+ if fn(record):
+ result |= record
+ return result
+
+ def __sub__(self, other):
+ if not isinstance(other, Model) or self._model != other._model:
+ return NotImplemented
+
+ return Model(self._env, self._model, tuple(id_ for id_ in self._ids if id_ not in other._ids), fields=self._fields)
+
+ def __or__(self, other):
+ if not isinstance(other, Model) or self._model != other._model:
+ return NotImplemented
+
+ return Model(self._env, self._model, {*self._ids, *other._ids}, fields=self._fields)
+ __add__ = __or__
+
+ def __and__(self, other):
+ if not isinstance(other, Model) or self._model != other._model:
+ return NotImplemented
+
+ return Model(self._env, self._model, tuple(id_ for id_ in self._ids if id_ in other._ids), fields=self._fields)
+
+
+ def invalidate_cache(self, fnames=None, ids=None):
+ pass # not a concern when every access is an RPC call
diff --git a/forwardport/tests/test_simple.py b/forwardport/tests/test_simple.py
new file mode 100644
index 00000000..d997c369
--- /dev/null
+++ b/forwardport/tests/test_simple.py
@@ -0,0 +1,1068 @@
+# -*- coding: utf-8 -*-
+import collections
+import re
+import sys
+import time
+from operator import itemgetter
+
+import pytest
+
+from utils import *
+
+# need:
+# * an odoo server
+# - connected to a database
+# - with relevant modules loaded / installed
+# - set up project
+# - add repo, branch(es)
+# - provide visibility to contents si it can be queried & al
+# * a tunnel so the server is visible from the outside (webhooks)
+# * the ability to create repos on github
+# - repo name
+# - a github user to create a repo with
+# - a github owner to create a repo *for*
+# - provide ability to create commits, branches, prs, ...
+def make_basic(env, config, make_repo, *, reponame='proj', project_name='myproject'):
+ """ Creates a basic repo with 3 forking branches
+
+ 0 -- 1 -- 2 -- 3 -- 4 : a
+ |
+ `-- 11 -- 22 : b
+ |
+ `-- 111 : c
+ each branch just adds and modifies a file (resp. f, g and h) through the
+ contents sequence a b c d e
+ """
+ Projects = env['runbot_merge.project']
+ project = Projects.search([('name', '=', project_name)])
+ if not project:
+ project = env['runbot_merge.project'].create({
+ 'name': project_name,
+ 'github_token': config['github']['token'],
+ 'github_prefix': 'hansen',
+ 'fp_github_token': config['github']['token'],
+ 'required_statuses': 'legal/cla,ci/runbot',
+ 'branch_ids': [
+ (0, 0, {'name': 'a', 'fp_sequence': 2, 'fp_target': True}),
+ (0, 0, {'name': 'b', 'fp_sequence': 1, 'fp_target': True}),
+ (0, 0, {'name': 'c', 'fp_sequence': 0, 'fp_target': True}),
+ ],
+ })
+
+ prod = make_repo(reponame)
+ with prod:
+ a_0, a_1, a_2, a_3, a_4, = prod.make_commits(
+ None,
+ Commit("0", tree={'f': 'a'}),
+ Commit("1", tree={'f': 'b'}),
+ Commit("2", tree={'f': 'c'}),
+ Commit("3", tree={'f': 'd'}),
+ Commit("4", tree={'f': 'e'}),
+ ref='heads/a',
+ )
+ b_1, b_2 = prod.make_commits(
+ a_2,
+ Commit('11', tree={'g': 'a'}),
+ Commit('22', tree={'g': 'b'}),
+ ref='heads/b',
+ )
+ prod.make_commits(
+ b_1,
+ Commit('111', tree={'h': 'a'}),
+ ref='heads/c',
+ )
+ other = prod.fork()
+ project.write({
+ 'repo_ids': [(0, 0, {
+ 'name': prod.name,
+ 'fp_remote_target': other.name,
+ })],
+ })
+
+ return prod, other
+def test_straightforward_flow(env, config, make_repo, users):
+ # TODO: ~all relevant data in users when creating partners
+ # get reviewer's name
+ reviewer_name = env['res.partner'].search([
+ ('github_login', '=', users['reviewer'])
+ ]).name
+
+ prod, other = make_basic(env, config, make_repo)
+ other_user = config['role_other']
+ other_user_repo = prod.fork(token=other_user['token'])
+
+ project = env['runbot_merge.project'].search([])
+ b_head = prod.commit('b')
+ c_head = prod.commit('c')
+ with prod, other_user_repo:
+ # create PR as a user with no access to prod (or other)
+ [_, p_1] = other_user_repo.make_commits(
+ 'a',
+ Commit('p_0', tree={'x': '0'}),
+ Commit('p_1', tree={'x': '1'}),
+ ref='heads/hugechange'
+ )
+ pr = prod.make_pr(
+ target='a', title="super important change",
+ head=other_user['user'] + ':hugechange',
+ token=other_user['token']
+ )
+ prod.post_status(p_1, 'success', 'legal/cla')
+ prod.post_status(p_1, 'success', 'ci/runbot')
+ # use rebase-ff (instead of rebase-merge) so we don't have to dig in
+ # parents of the merge commit to find the cherrypicks
+ pr.post_comment('hansen r+ rebase-ff', config['role_reviewer']['token'])
+
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+
+ # should merge the staging then create the FP PR
+ env.run_crons()
+
+ p_1_merged = prod.commit('a')
+
+ assert p_1_merged.id != p_1
+ assert p_1_merged.message == MESSAGE_TEMPLATE.format(
+ message='p_1',
+ repo=prod.name,
+ number=pr.number,
+ headers='',
+ name=reviewer_name,
+ login=users['reviewer'],
+ )
+ assert prod.read_tree(p_1_merged) == {
+ 'f': 'e',
+ 'x': '1',
+ }, "ensure p_1_merged has ~ the same contents as p_1 but is a different commit"
+ [p_0_merged] = p_1_merged.parents
+
+ # wait a bit for PR webhook... ?
+ time.sleep(5)
+ env.run_crons()
+
+ pr0, pr1 = env['runbot_merge.pull_requests'].search([], order='number')
+ assert pr0.number == pr.number
+ # 50 lines in, we can start checking the forward port...
+ assert pr1.parent_id == pr0
+ assert pr1.source_id == pr0
+ other_owner = other.name.split('/')[0]
+ assert re.match(other_owner + ':' + REF_PATTERN.format(target='b', source='hugechange'), pr1.label), \
+ "check that FP PR was created in FP target repo"
+ c = prod.commit(pr1.head)
+ # TODO: add original committer (if !author) as co-author in commit message?
+ assert c.author['name'] == other_user['user'], "author should still be original's probably"
+ assert itemgetter('name', 'email')(c.committer) == (project.fp_github_name, project.fp_github_email)
+ assert prod.read_tree(c) == {
+ 'f': 'c',
+ 'g': 'b',
+ 'x': '1'
+ }
+ with prod:
+ prod.post_status(pr1.head, 'success', 'ci/runbot')
+ prod.post_status(pr1.head, 'success', 'legal/cla')
+
+ env.run_crons()
+ env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron')
+
+ pr0_, pr1_, pr2 = env['runbot_merge.pull_requests'].search([], order='number')
+ assert pr0_ == pr0
+ assert pr1_ == pr1
+ assert pr2.parent_id == pr1
+ assert pr2.source_id == pr0
+ assert not pr0.squash, "original PR has >1 commit"
+ assert not (pr1.squash or pr2.squash), "forward ports should also have >1 commit"
+ assert re.match(REF_PATTERN.format(target='c', source='hugechange'), pr2.refname), \
+ "check that FP PR was created in FP target repo"
+ assert prod.read_tree(prod.commit(pr2.head)) == {
+ 'f': 'c',
+ 'g': 'a',
+ 'h': 'a',
+ 'x': '1'
+ }
+ assert prod.get_pr(pr2.number).comments == [
+ (users['user'], "Ping @%s, @%s" % (users['other'], users['reviewer'])),
+ (users['user'], "This forward port PR is awaiting action"),
+ ]
+ assert prod.get_pr(pr2.number).comments == [
+ (users['user'], "Ping @%s, @%s" % (users['other'], users['reviewer'])),
+ (users['user'], "This forward port PR is awaiting action"),
+ ]
+ with prod:
+ prod.post_status(pr2.head, 'success', 'ci/runbot')
+ prod.post_status(pr2.head, 'success', 'legal/cla')
+
+ prod.get_pr(pr2.number).post_comment('%s r+' % project.fp_github_name, config['role_reviewer']['token'])
+
+ env.run_crons()
+
+ assert pr1.staging_id
+ assert pr2.staging_id
+ # two branches so should have two stagings
+ assert pr1.staging_id != pr2.staging_id
+ # validate
+ with prod:
+ prod.post_status('staging.b', 'success', 'ci/runbot')
+ prod.post_status('staging.b', 'success', 'legal/cla')
+ prod.post_status('staging.c', 'success', 'ci/runbot')
+ prod.post_status('staging.c', 'success', 'legal/cla')
+
+ # and trigger merge
+ env.run_crons()
+
+ # apparently github strips out trailing newlines when fetching through the
+ # API...
+ message_template = MESSAGE_TEMPLATE.format(
+ message='p_1',
+ repo=prod.name,
+ number='%s',
+ headers='X-original-commit: {}\n'.format(p_1_merged.id),
+ name=reviewer_name,
+ login=users['reviewer'],
+ )
+
+ old_b = prod.read_tree(b_head)
+ head_b = prod.commit('b')
+ assert head_b.message == message_template % pr1.number
+ assert prod.commit(head_b.parents[0]).message == 'p_0\n\nX-original-commit: %s' % p_0_merged
+ b_tree = prod.read_tree(head_b)
+ assert b_tree == {
+ **old_b,
+ 'x': '1',
+ }
+ old_c = prod.read_tree(c_head)
+ head_c = prod.commit('c')
+ assert head_c.message == message_template % pr2.number
+ assert prod.commit(head_c.parents[0]).message == 'p_0\n\nX-original-commit: %s' % p_0_merged
+ c_tree = prod.read_tree(head_c)
+ assert c_tree == {
+ **old_c,
+ 'x': '1',
+ }
+ # check that we didn't just smash the original trees
+ assert prod.read_tree(prod.commit('a')) != b_tree != c_tree
+
+def test_update_pr(env, config, make_repo):
+ """ Even for successful cherrypicks, it's possible that e.g. CI doesn't
+ pass or the reviewer finds out they need to update the code.
+
+ In this case, all following forward ports should... be detached? Or maybe
+ only this one and its dependent should be updated?
+ """
+ prod, other = make_basic(env, config, make_repo)
+ with prod:
+ [p_1] = prod.make_commits(
+ 'a',
+ Commit('p_0', tree={'x': '0'}),
+ ref='heads/hugechange'
+ )
+ pr = prod.make_pr(target='a', head='hugechange')
+ prod.post_status(p_1, 'success', 'legal/cla')
+ prod.post_status(p_1, 'success', 'ci/runbot')
+ pr.post_comment('hansen r+', config['role_reviewer']['token'])
+
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+
+ # should merge the staging then create the FP PR
+ env.run_crons()
+
+ pr0, pr1 = env['runbot_merge.pull_requests'].search([], order='number')
+ with prod: # FP to c as well
+ prod.post_status(pr1.head, 'success', 'ci/runbot')
+ prod.post_status(pr1.head, 'success', 'legal/cla')
+ env.run_crons()
+
+ pr0, pr1, pr2 = env['runbot_merge.pull_requests'].search([], order='number')
+ assert pr1.parent_id == pr0
+ assert pr2.parent_id == pr1
+ pr1_head = pr1.head
+ pr2_head = pr2.head
+
+ # turns out branch b is syntactically but not semantically compatible! It
+ # needs x to be 5!
+ pr_repo, pr_ref = prod.get_pr(pr1.number).branch
+ with pr_repo:
+ # force-push correct commit to PR's branch
+ [new_c] = pr_repo.make_commits(
+ pr1.target.name,
+ Commit('whop whop', tree={'x': '5'}),
+ ref='heads/%s' % pr_ref
+ )
+ env.run_crons()
+
+ assert pr1.head == new_c != pr1_head, "the FP PR should be updated"
+ assert not pr1.parent_id, "the FP PR should be detached from the original"
+ # NOTE: should the followup PR wait for pr1 CI or not?
+ assert pr2.head != pr2_head
+ assert pr2.parent_id == pr1, "the followup PR should still be linked"
+
+ assert prod.read_tree(prod.commit(pr1.head)) == {
+ 'f': 'c',
+ 'g': 'b',
+ 'x': '5'
+ }, "the FP PR should have the new code"
+ assert prod.read_tree(prod.commit(pr2.head)) == {
+ 'f': 'c',
+ 'g': 'a',
+ 'h': 'a',
+ 'x': '5'
+ }, "the followup FP should also have the update"
+
+ with pr_repo:
+ pr_repo.make_commits(
+ pr1.target.name,
+ Commit('fire!', tree={'h': '0'}),
+ ref='heads/%s' % pr_ref,
+ )
+ env.run_crons()
+ # since there are PRs, this is going to update pr2 as broken
+ assert prod.read_tree(prod.commit(pr1.head)) == {
+ 'f': 'c',
+ 'g': 'b',
+ 'h': '0'
+ }
+ assert prod.read_tree(prod.commit(pr2.head)) == {
+ 'f': 'c',
+ 'g': 'a',
+ 'h': '''<<<<<<< ours
+a
+=======
+0
+>>>>>>> theirs
+''',
+ }
+
+def test_conflict(env, config, make_repo):
+ """ If there's a conflict when forward-porting the commit, commit the
+ conflict and create a draft PR.
+ """
+ prod, other = make_basic(env, config, make_repo)
+ # reset b to b~1 (g=a) parent so there's no b -> c conflict
+ with prod:
+ prod.update_ref('heads/b', prod.commit('b').parents[0], force=True)
+
+ # generate a conflict: create a g file in a PR to a
+ with prod:
+ [p_0] = prod.make_commits(
+ 'a', Commit('p_0', tree={'g': 'xxx'}),
+ ref='heads/conflicting'
+ )
+ pr = prod.make_pr(target='a', head='conflicting')
+ prod.post_status(p_0, 'success', 'legal/cla')
+ prod.post_status(p_0, 'success', 'ci/runbot')
+ pr.post_comment('hansen r+', config['role_reviewer']['token'])
+
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+
+ env.run_crons()
+ # wait a bit for PR webhook... ?
+ time.sleep(5)
+ env.run_crons()
+
+ # should have created a new PR
+ pr0, pr1 = env['runbot_merge.pull_requests'].search([], order='number')
+ # but it should not have a parent, and there should be conflict markers
+ assert not pr1.parent_id
+ assert pr1.source_id == pr0
+ assert prod.read_tree(prod.commit('b')) == {
+ 'f': 'c',
+ 'g': 'a',
+ }
+ assert pr1.state == 'opened'
+ assert prod.read_tree(prod.commit(pr1.head)) == {
+ 'f': 'c',
+ 'g': '''<<<<<<< ours
+a
+=======
+xxx
+>>>>>>> theirs
+''',
+ }
+
+ # check that CI passing does not create more PRs
+ with prod:
+ validate_all([prod], [pr1.head])
+ env.run_crons()
+ time.sleep(5)
+ env.run_crons()
+ assert pr0 | pr1 == env['runbot_merge.pull_requests'].search([], order='number'),\
+ "CI passing should not have resumed the FP process on a conflicting / draft PR"
+
+ # fix the PR, should behave as if this were a normal PR
+ get_pr = prod.get_pr(pr1.number)
+ pr_repo, pr_ref = get_pr.branch
+ with pr_repo:
+ pr_repo.make_commits(
+ # if just given a branch name, goes and gets it from pr_repo whose
+ # "b" was cloned before that branch got rolled back
+ prod.commit('b').id,
+ Commit('g should indeed b xxx', tree={'g': 'xxx'}),
+ ref='heads/%s' % pr_ref
+ )
+ env.run_crons()
+ assert prod.read_tree(prod.commit(pr1.head)) == {
+ 'f': 'c',
+ 'g': 'xxx',
+ }
+ assert pr1.state == 'opened', "state should be open still"
+
+ # check that merging the fixed PR fixes the flow and restarts a forward
+ # port process
+ with prod:
+ prod.post_status(pr1.head, 'success', 'legal/cla')
+ prod.post_status(pr1.head, 'success', 'ci/runbot')
+ get_pr.post_comment('hansen r+', config['role_reviewer']['token'])
+ env.run_crons()
+
+ print(f"check staging of {pr1} ({pr1.staging_id})", file=sys.stderr)
+ print(f'\t{pr1.batch_ids.read(["id", "active", "staging_id"])}', file=sys.stderr)
+ assert pr1.staging_id
+ with prod:
+ prod.post_status('staging.b', 'success', 'legal/cla')
+ prod.post_status('staging.b', 'success', 'ci/runbot')
+ env.run_crons()
+
+ *_, pr2 = env['runbot_merge.pull_requests'].search([], order='number')
+ assert pr2.parent_id == pr1
+ assert pr2.source_id == pr0
+ assert re.match(
+ REF_PATTERN.format(target='c', source='conflicting'),
+ pr2.refname
+ )
+ assert prod.read_tree(prod.commit(pr2.head)) == {
+ 'f': 'c',
+ 'g': 'xxx',
+ 'h': 'a',
+ }
+
+def test_empty(env, config, make_repo, users):
+ """ Cherrypick of an already cherrypicked (or separately implemented)
+ commit -> create draft PR.
+ """
+ prod, other = make_basic(env, config, make_repo)
+ # merge change to b
+ with prod:
+ [p_0] = prod.make_commits(
+ 'b', Commit('p', tree={'x': '0'}),
+ ref='heads/early'
+ )
+ pr0 = prod.make_pr(target='b', head='early')
+ prod.post_status(p_0, 'success', 'legal/cla')
+ prod.post_status(p_0, 'success', 'ci/runbot')
+ pr0.post_comment('hansen r+', config['role_reviewer']['token'])
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.b', 'success', 'legal/cla')
+ prod.post_status('staging.b', 'success', 'ci/runbot')
+
+ # merge same change to a afterwards
+ with prod:
+ [p_1] = prod.make_commits(
+ 'a', Commit('p_0', tree={'x': '0'}),
+ ref='heads/late'
+ )
+ pr1 = prod.make_pr(target='a', head='late')
+ prod.post_status(p_1, 'success', 'legal/cla')
+ prod.post_status(p_1, 'success', 'ci/runbot')
+ pr1.post_comment('hansen r+', config['role_reviewer']['token'])
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+
+ env.run_crons()
+ assert prod.read_tree(prod.commit('a')) == {
+ 'f': 'e',
+ 'x': '0',
+ }
+ assert prod.read_tree(prod.commit('b')) == {
+ 'f': 'c',
+ 'g': 'b',
+ 'x': '0',
+ }
+ # should have 4 PRs:
+ # PR 0
+ # FP of PR 0 to C
+ # PR 1
+ # failed FP of PR1 to B
+ prs = env['runbot_merge.pull_requests'].search([], order='number')
+ assert len(prs) == 4
+ pr0_id = prs.filtered(lambda p: p.number == pr0.number)
+ pr1_id = prs.filtered(lambda p: p.number == pr1.number)
+ fp_id = prs.filtered(lambda p: p.parent_id == pr0_id)
+ fail_id = prs - (pr0_id | pr1_id | fp_id)
+ assert fp_id
+ assert fail_id
+ # unlinked from parent since cherrypick failed
+ assert not fail_id.parent_id
+ # the tree should be clean...
+ assert prod.read_tree(prod.commit(fail_id.head)) == {
+ 'f': 'c',
+ 'g': 'b',
+ 'x': '0',
+ }
+
+ with prod:
+ validate_all([prod], [fp_id.head, fail_id.head])
+ env.run_crons()
+
+ # should not have created any new PR
+ assert env['runbot_merge.pull_requests'].search([], order='number') == prs
+
+ # check reminder
+ env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron')
+ env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron')
+ failed_pr = prod.get_pr(fail_id.number)
+ assert failed_pr.comments == [
+ (users['user'], 'Ping @{}, @{}'.format(
+ users['user'], users['reviewer'],
+ )),
+ (users['user'], "This forward port PR is awaiting action"),
+ (users['user'], "This forward port PR is awaiting action"),
+ ]
+ # check that this stops if we close the PR
+ with prod:
+ failed_pr.close()
+ env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron')
+ assert failed_pr.comments == [
+ (users['user'], 'Ping @{}, @{}'.format(
+ users['user'], users['reviewer'],
+ )),
+ (users['user'], "This forward port PR is awaiting action"),
+ (users['user'], "This forward port PR is awaiting action"),
+ ], "should not have a third note"
+
+def test_partially_empty(env, config, make_repo):
+ """ Check what happens when only some commits of the PR are now empty
+ """
+ prod, other = make_basic(env, config, make_repo)
+ # merge change to b
+ with prod:
+ [p_0] = prod.make_commits(
+ 'b', Commit('p', tree={'x': '0'}),
+ ref='heads/early'
+ )
+ pr0 = prod.make_pr(target='b', head='early')
+ prod.post_status(p_0, 'success', 'legal/cla')
+ prod.post_status(p_0, 'success', 'ci/runbot')
+ pr0.post_comment('hansen r+', config['role_reviewer']['token'])
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.b', 'success', 'legal/cla')
+ prod.post_status('staging.b', 'success', 'ci/runbot')
+
+ # merge same change to a afterwards
+ with prod:
+ [*_, p_1] = prod.make_commits(
+ 'a',
+ Commit('p_0', tree={'w': '0'}),
+ Commit('p_1', tree={'x': '0'}),
+ Commit('p_2', tree={'y': '0'}),
+ ref='heads/late'
+ )
+ pr1 = prod.make_pr(target='a', head='late')
+ prod.post_status(p_1, 'success', 'legal/cla')
+ prod.post_status(p_1, 'success', 'ci/runbot')
+ pr1.post_comment('hansen r+ rebase-merge', config['role_reviewer']['token'])
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+
+ env.run_crons()
+ assert prod.read_tree(prod.commit('a')) == {
+ 'f': 'e',
+ 'w': '0',
+ 'x': '0',
+ 'y': '0',
+ }
+ assert prod.read_tree(prod.commit('b')) == {
+ 'f': 'c',
+ 'g': 'b',
+ 'x': '0',
+ }
+
+ fail_id = env['runbot_merge.pull_requests'].search([
+ ('number', 'not in', [pr0.number, pr1.number]),
+ ('parent_id', '=', False),
+ ], order='number')
+ assert fail_id
+ # unlinked from parent since cherrypick failed
+ assert not fail_id.parent_id
+ # the tree should be clean...
+ assert prod.read_tree(prod.commit(fail_id.head)) == {
+ 'f': 'c',
+ 'g': 'b',
+ 'w': '0',
+ 'x': '0',
+ 'y': '0',
+ }
+
+Description = collections.namedtuple('Restriction', 'source limit')
+def test_limit_configure(env, config, make_repo):
+ """ Checks that configuring an FP limit on a PR is respected
+
+ * limits to not the latest
+ * limits to the current target (= no FP)
+ * limits to an earlier branch (???)
+ """
+ prod, other = make_basic(env, config, make_repo)
+ bot_name = env['runbot_merge.project'].search([]).fp_github_name
+ descriptions = [
+ Description(source='a', limit='b'),
+ Description(source='b', limit='b'),
+ Description(source='b', limit='a'),
+ ]
+ originals = []
+ with prod:
+ for i, descr in enumerate(descriptions):
+ [c] = prod.make_commits(
+ descr.source, Commit('c %d' % i, tree={str(i): str(i)}),
+ ref='heads/branch%d' % i,
+ )
+ pr = prod.make_pr(target=descr.source, head='branch%d'%i)
+ prod.post_status(c, 'success', 'legal/cla')
+ prod.post_status(c, 'success', 'ci/runbot')
+ pr.post_comment('hansen r+\n%s up to %s' % (bot_name, descr.limit), config['role_reviewer']['token'])
+ originals.append(pr.number)
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+ prod.post_status('staging.b', 'success', 'legal/cla')
+ prod.post_status('staging.b', 'success', 'ci/runbot')
+ env.run_crons()
+
+ # should have created a single FP PR for 0, none for 1 and none for 2
+ prs = env['runbot_merge.pull_requests'].search([], order='number')
+ assert len(prs) == 4
+ assert prs[-1].parent_id == prs[0]
+ assert prs[0].number == originals[0]
+ assert prs[1].number == originals[1]
+ assert prs[2].number == originals[2]
+
+@pytest.mark.parametrize('enabled', ['active', 'fp_target'])
+def test_limit_disable(env, config, make_repo, users, enabled):
+ """ Checks behaviour if the limit target is disabled:
+
+ * disable target while FP is ongoing -> skip over (and stop there so no FP)
+ * forward-port over a disabled branch
+ * request a disabled target as limit
+
+ Disabling (with respect to forward ports) can be performed by marking the
+ branch as !active (which also affects mergebot operations), or as
+ !fp_target (won't be forward-ported to).
+ """
+ prod, other = make_basic(env, config, make_repo)
+ bot_name = env['runbot_merge.project'].search([]).fp_github_name
+ with prod:
+ [c] = prod.make_commits('a', Commit('c 0', tree={'0': '0'}), ref='heads/branch0')
+ pr = prod.make_pr(target='a', head='branch0')
+ prod.post_status(c, 'success', 'legal/cla')
+ prod.post_status(c, 'success', 'ci/runbot')
+ pr.post_comment('hansen r+\n%s up to b' % bot_name, config['role_reviewer']['token'])
+
+ [c] = prod.make_commits('a', Commit('c 1', tree={'1': '1'}), ref='heads/branch1')
+ pr = prod.make_pr(target='a', head='branch1')
+ prod.post_status(c, 'success', 'legal/cla')
+ prod.post_status(c, 'success', 'ci/runbot')
+ pr.post_comment('hansen r+', config['role_reviewer']['token'])
+ env.run_crons()
+
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+ # disable branch b
+ env['runbot_merge.branch'].search([('name', '=', 'b')]).write({enabled: False})
+ env.run_crons()
+
+ # should have created a single PR (to branch c, for pr 1)
+ _0, _1, p = env['runbot_merge.pull_requests'].search([], order='number')
+ assert p.parent_id == _1
+ assert p.target.name == 'c'
+
+ with prod:
+ [c] = prod.make_commits('a', Commit('c 2', tree={'2': '2'}), ref='heads/branch2')
+ pr = prod.make_pr(target='a', head='branch2')
+ prod.post_status(c, 'success', 'legal/cla')
+ prod.post_status(c, 'success', 'ci/runbot')
+ pr.post_comment('hansen r+\n%s up to' % bot_name, config['role_reviewer']['token'])
+ pr.post_comment('%s up to b' % bot_name, config['role_reviewer']['token'])
+ pr.post_comment('%s up to foo' % bot_name, config['role_reviewer']['token'])
+ pr.post_comment('%s up to c' % bot_name, config['role_reviewer']['token'])
+ env.run_crons()
+
+ # use a set because git webhooks delays might lead to mis-ordered
+ # responses and we don't care that much
+ assert set(pr.comments) == {
+ (users['reviewer'], "hansen r+\n%s up to" % bot_name),
+ (users['reviewer'], "%s up to b" % bot_name),
+ (users['reviewer'], "%s up to foo" % bot_name),
+ (users['reviewer'], "%s up to c" % bot_name),
+ (users['user'], "Please provide a branch to forward-port to."),
+ (users['user'], "Branch 'b' is disabled, it can't be used as a forward port target."),
+ (users['user'], "There is no branch 'foo', it can't be used as a forward port target."),
+ (users['user'], "Forward-porting to 'c'."),
+ }
+
+# reviewer = of the FP sequence, the original PR is always reviewed by `user`
+# set as reviewer
+Case = collections.namedtuple('Case', 'author reviewer delegate success')
+ACL = [
+ Case('reviewer', 'reviewer', None, False),
+ Case('reviewer', 'self_reviewer', None, False),
+ Case('reviewer', 'other', None, False),
+ Case('reviewer', 'other', 'other', True),
+
+ Case('self_reviewer', 'reviewer', None, True),
+ Case('self_reviewer', 'self_reviewer', None, True),
+ Case('self_reviewer', 'other', None, False),
+ Case('self_reviewer', 'other', 'other', True),
+
+ Case('other', 'reviewer', None, True),
+ Case('other', 'self_reviewer', None, False),
+ Case('other', 'other', None, False),
+ Case('other', 'other', 'other', True),
+]
+@pytest.mark.parametrize(Case._fields, ACL)
+def test_access_rights(env, config, make_repo, users, author, reviewer, delegate, success):
+ prod, other = make_basic(env, config, make_repo)
+ project = env['runbot_merge.project'].search([])
+
+ # create a partner for `user`
+ env['res.partner'].create({
+ 'name': users['user'],
+ 'github_login': users['user'],
+ 'reviewer': True,
+ })
+
+ author_token = config['role_' + author]['token']
+ fork = prod.fork(token=author_token)
+ with prod, fork:
+ [c] = fork.make_commits('a', Commit('c_0', tree={'y': '0'}), ref='heads/accessrights')
+ pr = prod.make_pr(
+ target='a', title='my change',
+ head=users[author] + ':accessrights',
+ token=author_token,
+ )
+ prod.post_status(c, 'success', 'legal/cla')
+ prod.post_status(c, 'success', 'ci/runbot')
+ pr.post_comment('hansen r+', token=config['github']['token'])
+ if delegate:
+ pr.post_comment('hansen delegate=%s' % users[delegate], token=config['github']['token'])
+ env.run_crons()
+
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+ env.run_crons()
+
+ pr0, pr1 = env['runbot_merge.pull_requests'].search([], order='number')
+ assert pr0.state == 'merged'
+ with prod:
+ prod.post_status(pr1.head, 'success', 'ci/runbot')
+ prod.post_status(pr1.head, 'success', 'legal/cla')
+ env.run_crons()
+
+ _, _, pr2 = env['runbot_merge.pull_requests'].search([], order='number')
+ with prod:
+ prod.post_status(pr2.head, 'success', 'ci/runbot')
+ prod.post_status(pr2.head, 'success', 'legal/cla')
+ prod.get_pr(pr2.number).post_comment(
+ '%s r+' % project.fp_github_name,
+ token=config['role_' + reviewer]['token']
+ )
+ env.run_crons()
+ print(f"check staging of {pr1} ({pr1.staging_id}), {pr2} ({pr2.staging_id})", file=sys.stderr)
+ if success:
+ assert pr1.staging_id and pr2.staging_id,\
+ "%s should have approved FP of PRs by %s" % (reviewer, author)
+ else:
+ assert not (pr1.staging_id or pr2.staging_id),\
+ "%s should *not* have approved FP of PRs by %s" % (reviewer, author)
+
+def test_batched(env, config, make_repo, users):
+ """ Tests for projects with multiple repos & sync'd branches. Batches
+ should be FP'd to batches
+ """
+ main1, _ = make_basic(env, config, make_repo, reponame='main1')
+ main2, _ = make_basic(env, config, make_repo, reponame='main2')
+ main1.unsubscribe(config['role_reviewer']['token'])
+ main2.unsubscribe(config['role_reviewer']['token'])
+
+ friendo = config['role_other']
+ other1 = main1.fork(token=friendo['token'])
+ other2 = main2.fork(token=friendo['token'])
+
+ with main1, other1:
+ [c1] = other1.make_commits(
+ 'a', Commit('commit repo 1', tree={'1': 'a'}),
+ ref='heads/contribution'
+ )
+ pr1 = main1.make_pr(
+ target='a', title="My contribution",
+ head=friendo['user'] + ':contribution',
+ token=friendo['token']
+ )
+ # we can ack it directly as it should not be taken in account until
+ # we run crons
+ validate_all([main1], [c1])
+ pr1.post_comment('hansen r+', config['role_reviewer']['token'])
+ with main2, other2:
+ [c2] = other2.make_commits(
+ 'a', Commit('commit repo 2', tree={'2': 'a'}),
+ ref='heads/contribution' # use same ref / label as pr1
+ )
+ pr2 = main2.make_pr(
+ target='a', title="Main2 part of my contribution",
+ head=friendo['user'] + ':contribution',
+ token=friendo['token']
+ )
+ validate_all([main2], [c2])
+ pr2.post_comment('hansen r+', config['role_reviewer']['token'])
+
+ env.run_crons()
+
+ # sanity check: this should have created a staging with 1 batch with pr1 and pr2
+ stagings = env['runbot_merge.stagings'].search([])
+ assert len(stagings) == 1
+ assert stagings.target.name == 'a'
+ assert len(stagings.batch_ids) == 1
+ assert stagings.mapped('batch_ids.prs.number') == [pr1.number, pr2.number]
+
+ with main1, main2:
+ validate_all([main1, main2], ['staging.a'])
+ env.run_crons()
+
+ PullRequests = env['runbot_merge.pull_requests']
+ # created the first forward port, need to validate it so the second one is
+ # triggered (FP only goes forward on CI+) (?)
+ pr1b = PullRequests.search([
+ ('source_id', '!=', False),
+ ('repository.name', '=', main1.name),
+ ])
+ pr2b = PullRequests.search([
+ ('source_id', '!=', False),
+ ('repository.name', '=', main2.name),
+ ])
+ # check that relevant users were pinged
+ ping = [
+ (users['user'], "Ping @{}, @{}".format(friendo['user'], users['reviewer'])),
+ ]
+ pr_remote_1b = main1.get_pr(pr1b.number)
+ pr_remote_2b = main2.get_pr(pr2b.number)
+ assert pr_remote_1b.comments == ping
+ assert pr_remote_2b.comments == ping
+
+ with main1, main2:
+ validate_all([main1], [pr1b.head])
+ validate_all([main2], [pr2b.head])
+ env.run_crons() # process updated statuses -> generate followup FP
+
+ # should have created two PRs whose source is p1 and two whose source is p2
+ pr1a, pr1b, pr1c = PullRequests.search([
+ ('repository.name', '=', main1.name),
+ ], order='number')
+ pr2a, pr2b, pr2c = PullRequests.search([
+ ('repository.name', '=', main2.name),
+ ], order='number')
+
+ assert pr1a.number == pr1.number
+ assert pr2a.number == pr2.number
+ assert pr1a.state == pr2a.state == 'merged'
+
+ print('PRA label', (pr1a | pr2a).mapped('label'))
+ print('PRB label', (pr1b | pr2b).mapped('label'))
+ print('PRC label', (pr1c | pr2c).mapped('label'))
+
+ assert pr1b.label == pr2b.label, "batched source should yield batched FP"
+ assert pr1c.label == pr2c.label, "batched source should yield batched FP"
+ assert pr1b.label != pr1c.label
+
+ project = env['runbot_merge.project'].search([])
+ # ok main1 PRs
+ with main1:
+ validate_all([main1], [pr1c.head])
+ main1.get_pr(pr1c.number).post_comment('%s r+' % project.fp_github_name, config['role_reviewer']['token'])
+ env.run_crons()
+
+ # check that the main1 PRs are ready but blocked on the main2 PRs
+ assert pr1b.state == 'ready'
+ assert pr1c.state == 'ready'
+ assert pr1b.blocked
+ assert pr1c.blocked
+
+ # ok main2 PRs
+ with main2:
+ validate_all([main2], [pr2c.head])
+ main2.get_pr(pr2c.number).post_comment('%s r+' % project.fp_github_name, config['role_reviewer']['token'])
+ env.run_crons()
+
+ stb, stc = env['runbot_merge.stagings'].search([], order='target')
+ assert stb.target.name == 'b'
+ assert stc.target.name == 'c'
+
+ with main1, main2:
+ validate_all([main1, main2], ['staging.b', 'staging.c'])
+
+class TestClosing:
+ def test_closing_before_fp(self, env, config, make_repo, users):
+ """ Closing a PR should preclude its forward port
+ """
+ prod, other = make_basic(env, config, make_repo)
+ with prod:
+ [p_1] = prod.make_commits(
+ 'a',
+ Commit('p_0', tree={'x': '0'}),
+ ref='heads/hugechange'
+ )
+ pr = prod.make_pr(target='a', head='hugechange')
+ prod.post_status(p_1, 'success', 'legal/cla')
+ prod.post_status(p_1, 'success', 'ci/runbot')
+ pr.post_comment('hansen r+', config['role_reviewer']['token'])
+
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+ # should merge the staging then create the FP PR
+ env.run_crons()
+
+ pr0, pr1 = env['runbot_merge.pull_requests'].search([], order='number')
+ # close the FP PR then have CI validate it
+ with prod:
+ prod.get_pr(pr1.number).close()
+ assert pr1.state == 'closed'
+ assert not pr1.parent_id, "closed PR should should be detached from its parent"
+ with prod:
+ prod.post_status(pr1.head, 'success', 'legal/cla')
+ prod.post_status(pr1.head, 'success', 'ci/runbot')
+ env.run_crons()
+ env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron')
+
+ assert env['runbot_merge.pull_requests'].search([], order='number') == pr0 | pr1,\
+ "closing the PR should suppress the FP sequence"
+ assert prod.get_pr(pr1.number).comments == [
+ (users['user'], 'Ping @{}, @{}'.format(
+ users['user'], users['reviewer'],
+ ))
+ ]
+
+ def test_closing_after_fp(self, env, config, make_repo):
+ """ Closing a PR which has been forward-ported should not touch the
+ followups
+ """
+ prod, other = make_basic(env, config, make_repo)
+ with prod:
+ [p_1] = prod.make_commits(
+ 'a',
+ Commit('p_0', tree={'x': '0'}),
+ ref='heads/hugechange'
+ )
+ pr = prod.make_pr(target='a', head='hugechange')
+ prod.post_status(p_1, 'success', 'legal/cla')
+ prod.post_status(p_1, 'success', 'ci/runbot')
+ pr.post_comment('hansen r+', config['role_reviewer']['token'])
+
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+
+ # should merge the staging then create the FP PR
+ env.run_crons()
+
+ pr0, pr1 = env['runbot_merge.pull_requests'].search([], order='number')
+ with prod:
+ prod.post_status(pr1.head, 'success', 'legal/cla')
+ prod.post_status(pr1.head, 'success', 'ci/runbot')
+ # should create the second staging
+ env.run_crons()
+
+ pr0_1, pr1_1, pr2_1 = env['runbot_merge.pull_requests'].search([], order='number')
+ assert pr0_1 == pr0
+ assert pr1_1 == pr1
+
+ with prod:
+ prod.get_pr(pr1.number).close()
+
+ assert pr1_1.state == 'closed'
+ assert not pr1_1.parent_id
+ assert pr2_1.state == 'opened'
+
+def sPeNgBaB(s):
+ return ''.join(
+ l if i % 2 == 0 else l.upper()
+ for i, l in enumerate(s)
+ )
+def test_spengbab():
+ assert sPeNgBaB("spongebob") == 'sPoNgEbOb'
+
+class TestRecognizeCommands:
+ def make_pr(self, env, config, make_repo):
+ r, _ = make_basic(env, config, make_repo)
+
+ with r:
+ r.make_commits('c', Commit('p', tree={'x': '0'}), ref='heads/testbranch')
+ pr = r.make_pr(target='a', head='testbranch')
+
+ return r, pr, env['runbot_merge.pull_requests'].search([
+ ('repository.name', '=', r.name),
+ ('number', '=', pr.number),
+ ])
+
+ def test_botname_casing(self, env, config, make_repo):
+ """ Test that the botname is case-insensitive as people might write
+ bot names capitalised or titlecased or uppercased or whatever
+ """
+ repo, pr, pr_id = self.make_pr(env, config, make_repo)
+ assert pr_id.state == 'opened'
+ botname = env['runbot_merge.project'].search([]).fp_github_name
+ [a] = env['runbot_merge.branch'].search([
+ ('name', '=', 'a')
+ ])
+ [c] = env['runbot_merge.branch'].search([
+ ('name', '=', 'c')
+ ])
+
+ names = [
+ botname,
+ botname.upper(),
+ botname.capitalize(),
+ sPeNgBaB(botname),
+ ]
+
+ print([repr(o) for o in [a, c, pr_id, pr_id.limit_id]])
+ for n in names:
+ assert pr_id.limit_id == c
+ with repo:
+ pr.post_comment('@%s up to a' % n, config['role_reviewer']['token'])
+ assert pr_id.limit_id == a
+ # reset state
+ pr_id.write({'limit_id': c.id})
+
+ @pytest.mark.parametrize('indent', ['', '\N{SPACE}', '\N{SPACE}'*4, '\N{TAB}'])
+ def test_botname_indented(self, env, config, make_repo, indent):
+ """ matching botname should ignore leading whitespaces
+ """
+ repo, pr, pr_id = self.make_pr(env, config, make_repo)
+ assert pr_id.state == 'opened'
+ botname = env['runbot_merge.project'].search([]).fp_github_name
+ [a] = env['runbot_merge.branch'].search([
+ ('name', '=', 'a')
+ ])
+ [c] = env['runbot_merge.branch'].search([
+ ('name', '=', 'c')
+ ])
+
+ assert pr_id.limit_id == c
+ with repo:
+ pr.post_comment('%s@%s up to a' % (indent, botname), config['role_reviewer']['token'])
+ assert pr_id.limit_id == a
diff --git a/forwardport/tests/test_weird.py b/forwardport/tests/test_weird.py
new file mode 100644
index 00000000..7d851cb8
--- /dev/null
+++ b/forwardport/tests/test_weird.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+import sys
+
+import pytest
+import re
+
+from utils import *
+
+def make_basic(env, config, make_repo, *, fp_token, fp_remote):
+ """ Creates a basic repo with 3 forking branches
+
+ 0 -- 1 -- 2 -- 3 -- 4 : a
+ |
+ `-- 11 -- 22 : b
+ |
+ `-- 111 : c
+ each branch just adds and modifies a file (resp. f, g and h) through the
+ contents sequence a b c d e
+ """
+ Projects = env['runbot_merge.project']
+ project = Projects.search([('name', '=', 'myproject')])
+ if not project:
+ project = env['runbot_merge.project'].create({
+ 'name': 'myproject',
+ 'github_token': config['github']['token'],
+ 'github_prefix': 'hansen',
+ 'fp_github_token': fp_token and config['github']['token'],
+ 'required_statuses': 'legal/cla,ci/runbot',
+ 'branch_ids': [
+ (0, 0, {'name': 'a', 'fp_sequence': 2, 'fp_target': True}),
+ (0, 0, {'name': 'b', 'fp_sequence': 1, 'fp_target': True}),
+ (0, 0, {'name': 'c', 'fp_sequence': 0, 'fp_target': True}),
+ ],
+ })
+
+ prod = make_repo('proj')
+ with prod:
+ a_0, a_1, a_2, a_3, a_4, = prod.make_commits(
+ None,
+ Commit("0", tree={'f': 'a'}),
+ Commit("1", tree={'f': 'b'}),
+ Commit("2", tree={'f': 'c'}),
+ Commit("3", tree={'f': 'd'}),
+ Commit("4", tree={'f': 'e'}),
+ ref='heads/a',
+ )
+ b_1, b_2 = prod.make_commits(
+ a_2,
+ Commit('11', tree={'g': 'a'}),
+ Commit('22', tree={'g': 'b'}),
+ ref='heads/b',
+ )
+ prod.make_commits(
+ b_1,
+ Commit('111', tree={'h': 'a'}),
+ ref='heads/c',
+ )
+ other = prod.fork()
+ project.write({
+ 'repo_ids': [(0, 0, {
+ 'name': prod.name,
+ 'fp_remote_target': fp_remote and other.name,
+ })],
+ })
+
+ return project, prod, other
+
+def test_no_token(env, config, make_repo):
+ """ if there's no token on the repo, nothing should break though should
+ log
+ """
+ # create project configured with remotes on the repo but no token
+ proj, prod, _ = make_basic(env, config, make_repo, fp_token=False, fp_remote=True)
+
+ with prod:
+ prod.make_commits(
+ 'a', Commit('c0', tree={'a': '0'}), ref='heads/abranch'
+ )
+ pr = prod.make_pr(target='a', head='abranch')
+ prod.post_status(pr.head, 'success', 'legal/cla')
+ prod.post_status(pr.head, 'success', 'ci/runbot')
+ pr.post_comment('hansen r+', config['role_reviewer']['token'])
+
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+
+ # wanted to use capfd, however it's not compatible with the subprocess
+ # being created beforehand and server() depending on capfd() would remove
+ # all its output from the normal pytest capture (dumped on test failure)
+ #
+ # so I'd really have to hand-roll the entire thing by having server()
+ # pipe stdout/stderr to temp files, yield those temp files, and have the
+ # tests mess around with reading those files, and finally have the server
+ # dump the file contents back to the test runner's stdout/stderr on
+ # fixture teardown...
+ env.run_crons()
+ assert len(env['runbot_merge.pull_requests'].search([], order='number')) == 1,\
+ "should not have created forward port"
+
+def test_remove_token(env, config, make_repo):
+ proj, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=True)
+ proj.fp_github_token = False
+
+ with prod:
+ prod.make_commits(
+ 'a', Commit('c0', tree={'a': '0'}), ref='heads/abranch'
+ )
+ pr = prod.make_pr(target='a', head='abranch')
+ prod.post_status(pr.head, 'success', 'legal/cla')
+ prod.post_status(pr.head, 'success', 'ci/runbot')
+ pr.post_comment('hansen r+', config['role_reviewer']['token'])
+
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+
+ env.run_crons()
+ assert len(env['runbot_merge.pull_requests'].search([], order='number')) == 1,\
+ "should not have created forward port"
+
+def test_no_target(env, config, make_repo):
+ proj, prod, _ = make_basic(env, config, make_repo, fp_token=True, fp_remote=False)
+
+ with prod:
+ prod.make_commits(
+ 'a', Commit('c0', tree={'a': '0'}), ref='heads/abranch'
+ )
+ pr = prod.make_pr(target='a', head='abranch')
+ prod.post_status(pr.head, 'success', 'legal/cla')
+ prod.post_status(pr.head, 'success', 'ci/runbot')
+ pr.post_comment('hansen r+', config['role_reviewer']['token'])
+
+ env.run_crons()
+ with prod:
+ prod.post_status('staging.a', 'success', 'legal/cla')
+ prod.post_status('staging.a', 'success', 'ci/runbot')
+
+ env.run_crons()
+ assert len(env['runbot_merge.pull_requests'].search([], order='number')) == 1,\
+ "should not have created forward port"
diff --git a/forwardport/tests/utils.py b/forwardport/tests/utils.py
new file mode 100644
index 00000000..857f42e8
--- /dev/null
+++ b/forwardport/tests/utils.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+# target branch '-' source branch '-' base32 unique '-forwardport'
+import itertools
+
+MESSAGE_TEMPLATE = """{message}
+
+closes {repo}#{number}
+
+{headers}Signed-off-by: {name} <{login}@users.noreply.github.com>"""
+REF_PATTERN = r'{target}-{source}-\w{{8}}-forwardport'
+
+class Commit:
+ def __init__(self, message, *, author=None, committer=None, tree):
+ self.id = None
+ self.message = message
+ self.author = author
+ self.committer = committer
+ self.tree = tree
+
+def validate_all(repos, refs, contexts=('ci/runbot', 'legal/cla')):
+ """ Post a "success" status for each context on each ref of each repo
+ """
+ for repo, branch, context in itertools.product(repos, refs, contexts):
+ repo.post_status(branch, 'success', context)
diff --git a/runbot_merge/controllers/__init__.py b/runbot_merge/controllers/__init__.py
index 2b916bfd..40975dbc 100644
--- a/runbot_merge/controllers/__init__.py
+++ b/runbot_merge/controllers/__init__.py
@@ -163,40 +163,12 @@ def handle_pr(env, event):
# don't marked merged PRs as closed (!!!)
if event['action'] == 'closed' and pr_obj.state != 'merged':
- # ignore if the PR is already being updated in a separate transaction
- # (most likely being merged?)
- env.cr.execute('''
- SELECT id, state FROM runbot_merge_pull_requests
- WHERE id = %s AND state != 'merged'
- FOR UPDATE SKIP LOCKED;
- ''', [pr_obj.id])
- res = env.cr.fetchone()
# FIXME: store some sort of "try to close it later" if the merge fails?
- if not res:
+ if pr_obj._try_closing(event['sender']['login']):
+ return 'Closed {}'.format(pr_obj.id)
+ else:
return 'Ignored: could not lock rows (probably being merged)'
- env.cr.execute('''
- UPDATE runbot_merge_pull_requests
- SET state = 'closed'
- WHERE id = %s AND state != 'merged'
- ''', [pr_obj.id])
- env.cr.commit()
- pr_obj.invalidate_cache(fnames=['state'], ids=[pr_obj.id])
- if env.cr.rowcount:
- env['runbot_merge.pull_requests.tagging'].create({
- 'pull_request': pr_obj.number,
- 'repository': repo.id,
- 'state_from': res[1] if not pr_obj.staging_id else 'staged',
- 'state_to': 'closed',
- })
- pr_obj.unstage(
- "PR %s:%s closed by %s",
- pr_obj.repository.name, pr_obj.number,
- event['sender']['login']
- )
-
- return 'Closed {}'.format(pr_obj.id)
-
if event['action'] == 'reopened' and pr_obj.state == 'closed':
pr_obj.write({
'state': 'opened',
diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py
index 96f4c7bd..8bff67db 100644
--- a/runbot_merge/models/pull_requests.py
+++ b/runbot_merge/models/pull_requests.py
@@ -1,4 +1,5 @@
import base64
+import collections
import datetime
import io
import json
@@ -6,6 +7,7 @@ import logging
import os
import pprint
import re
+import sys
import time
from itertools import takewhile
@@ -433,7 +435,10 @@ class Branch(models.Model):
'state_to': 'staged',
})
- logger.info("Created staging %s (%s)", st, staged)
+ logger.info("Created staging %s (%s) to %s", st, ', '.join(
+ '%s[%s]' % (batch, batch.prs)
+ for batch in staged
+ ), st.target.name)
return st
def _check_visibility(self, repo, branch_name, expected_head, token):
@@ -453,6 +458,7 @@ class Branch(models.Model):
return head == expected_head
return False
+ACL = collections.namedtuple('ACL', 'is_admin is_reviewer is_author')
class PullRequests(models.Model):
_name = 'runbot_merge.pull_requests'
_order = 'number desc'
@@ -533,7 +539,7 @@ class PullRequests(models.Model):
else:
separator = 's '
return '' % (separator, ' '.join(
- '%s:%s' % (p.repository.name, p.number)
+ '{0.id} ({0.repository.name}:{0.number})'.format(p)
for p in self
))
@@ -653,10 +659,7 @@ class PullRequests(models.Model):
(login, name) = (author.github_login, author.display_name) if author else (login, 'not in system')
- is_admin = (author.reviewer and self.author != author) or (author.self_reviewer and self.author == author)
- is_reviewer = is_admin or self in author.delegate_reviewer
- # TODO: should delegate reviewers be able to retry PRs?
- is_author = is_reviewer or self.author == author
+ is_admin, is_reviewer, is_author = self._pr_acl(author)
commands = dict(
ps
@@ -709,7 +712,6 @@ class PullRequests(models.Model):
elif command == 'review':
if param and is_reviewer:
newstate = RPLUS.get(self.state)
- print('r+', self.state, self.status)
if newstate:
self.state = newstate
self.reviewed_by = author
@@ -794,6 +796,16 @@ class PullRequests(models.Model):
})
return '\n'.join(msg)
+ def _pr_acl(self, user):
+ if not self:
+ return ACL(False, False, False)
+
+ is_admin = (user.reviewer and self.author != user) or (user.self_reviewer and self.author == user)
+ is_reviewer = is_admin or self in user.delegate_reviewer
+ # TODO: should delegate reviewers be able to retry PRs?
+ is_author = is_reviewer or self.author == user
+ return ACL(is_admin, is_reviewer, is_author)
+
def _validate(self, statuses):
# could have two PRs (e.g. one open and one closed) at least
# temporarily on the same head, or on the same head with different
@@ -1108,6 +1120,39 @@ class PullRequests(models.Model):
self.staging_id.cancel(reason, *args)
+ def _try_closing(self, by):
+ # ignore if the PR is already being updated in a separate transaction
+ # (most likely being merged?)
+ self.env.cr.execute('''
+ SELECT id, state FROM runbot_merge_pull_requests
+ WHERE id = %s AND state != 'merged'
+ FOR UPDATE SKIP LOCKED;
+ ''', [self.id])
+ res = self.env.cr.fetchone()
+ if not res:
+ return False
+
+ self.env.cr.execute('''
+ UPDATE runbot_merge_pull_requests
+ SET state = 'closed'
+ WHERE id = %s AND state != 'merged'
+ ''', [self.id])
+ self.env.cr.commit()
+ self.invalidate_cache(fnames=['state'], ids=[self.id])
+ if self.env.cr.rowcount:
+ self.env['runbot_merge.pull_requests.tagging'].create({
+ 'pull_request': self.number,
+ 'repository': self.repository.id,
+ 'state_from': res[1] if not self.staging_id else 'staged',
+ 'state_to': 'closed',
+ })
+ self.unstage(
+ "PR %s:%s closed by %s",
+ self.repository.name, self.number,
+ by
+ )
+ return True
+
# state changes on reviews
RPLUS = {
'opened': 'approved',
diff --git a/runbot_merge/tests/fake_github/git.py b/runbot_merge/tests/fake_github/git.py
index 756c5d1f..585f739b 100644
--- a/runbot_merge/tests/fake_github/git.py
+++ b/runbot_merge/tests/fake_github/git.py
@@ -119,7 +119,7 @@ def read_object(store, tid):
# recursively reads tree of objects
o = store[tid]
if isinstance(o, bytes):
- return o
+ return o.decode()
return {
k: read_object(store, v)
for k, v in o.items()
diff --git a/runbot_merge/tests/remote.py b/runbot_merge/tests/remote.py
index 28c74dee..f5ea4f73 100644
--- a/runbot_merge/tests/remote.py
+++ b/runbot_merge/tests/remote.py
@@ -534,7 +534,11 @@ class Repo:
)
def read_tree(self, commit):
- # read tree object
+ """ read tree object from commit
+
+ :param Commit commit:
+ :rtype: Dict[str, str]
+ """
r = self._session.get('https://api.github.com/repos/{}/git/trees/{}'.format(self.name, commit.tree))
assert 200 <= r.status_code < 300, r.json()
@@ -542,9 +546,10 @@ class Repo:
tree = {}
for t in r.json()['tree']:
assert t['type'] == 'blob', "we're *not* doing recursive trees in test cases"
- r = self._session.get('https://api.github.com/repos/{}/git/blobs/{}'.format(self.name, t['sha']))
+ r = self._session.get(t['url'])
assert 200 <= r.status_code < 300, r.json()
- tree[t['path']] = base64.b64decode(r.json()['content'])
+ # assume all test content is textual
+ tree[t['path']] = base64.b64decode(r.json()['content']).decode()
return tree
diff --git a/runbot_merge/tests/test_basic.py b/runbot_merge/tests/test_basic.py
index 7229f3be..6de13bb3 100644
--- a/runbot_merge/tests/test_basic.py
+++ b/runbot_merge/tests/test_basic.py
@@ -91,8 +91,8 @@ def test_trivial_flow(env, repo, page, users):
# with default-rebase, only one parent is "known"
assert master.parents[0] == m
assert repo.read_tree(master) == {
- 'a': b'some other content',
- 'b': b'a second file',
+ 'a': 'some other content',
+ 'b': 'a second file',
}
assert master.message == "gibberish\n\nblahblah\n\ncloses {repo.name}#1"\
"\n\nSigned-off-by: {reviewer.formatted_email}"\
@@ -803,8 +803,8 @@ def test_rebase_failure(env, repo, users, remote_p):
(users['reviewer'], 'hansen r+'),
]
assert repo.read_tree(repo.commit('heads/staging.master')) == {
- 'm': b'm',
- 'b': b'b',
+ 'm': 'm',
+ 'b': 'b',
}
def test_ci_failure_after_review(env, repo, users):
@@ -1010,7 +1010,7 @@ class TestMergeMethod:
"the pr head should not be an ancestor of the staging branch in a squash merge"
assert re.match('^force rebuild', staging.message)
assert repo.read_tree(staging) == {
- 'm': b'c1', 'm2': b'm2',
+ 'm': 'c1', 'm2': 'm2',
}, "the tree should still be correctly merged"
[actual_sha] = staging.parents
actual = repo.commit(actual_sha)
@@ -1218,7 +1218,7 @@ class TestMergeMethod:
assert master == merge_head
head = repo.commit('heads/master')
final_tree = repo.read_tree(head)
- assert final_tree == {'m': b'2', 'b': b'1'}, "sanity check of final tree"
+ assert final_tree == {'m': '2', 'b': '1'}, "sanity check of final tree"
r1 = repo.commit(head.parents[1])
r0 = repo.commit(r1.parents[0])
assert json.loads(pr.commits_map) == {
@@ -1291,7 +1291,7 @@ class TestMergeMethod:
assert master == nb1
head = repo.commit('heads/master')
final_tree = repo.read_tree(head)
- assert final_tree == {'m': b'2', 'b': b'1'}, "sanity check of final tree"
+ assert final_tree == {'m': '2', 'b': '1'}, "sanity check of final tree"
m1 = head
m0 = repo.commit(m1.parents[0])
@@ -1455,7 +1455,7 @@ class TestMergeMethod:
master = repo.commit('heads/master')
assert master.parents == [m2, c1]
- assert repo.read_tree(master) == {'a': b'1', 'b': b'2', 'bb': b'bb'}
+ assert repo.read_tree(master) == {'a': '1', 'b': '2', 'bb': 'bb'}
m1 = node('M1')
reviewer = get_partner(env, users["reviewer"]).formatted_email
@@ -1495,7 +1495,7 @@ class TestMergeMethod:
assert staging.parents == [m2],\
"the previous master's tip should be the sole parent of the staging commit"
assert repo.read_tree(staging) == {
- 'm': b'c2', 'm2': b'm2',
+ 'm': 'c2', 'm2': 'm2',
}, "the tree should still be correctly merged"
repo.post_status(staging.id, 'success', 'legal/cla')
@@ -1533,7 +1533,7 @@ class TestMergeMethod:
assert repo.is_ancestor(prx.head, of=staging.id)
assert staging.parents == [m2, c1]
assert repo.read_tree(staging) == {
- 'm': b'c1', 'm2': b'm2',
+ 'm': 'c1', 'm2': 'm2',
}
repo.post_status(staging.id, 'success', 'legal/cla')