2023-07-05 20:11:40 +07:00
|
|
|
import collections
|
|
|
|
import datetime
|
2021-10-20 19:28:29 +07:00
|
|
|
import pathlib
|
|
|
|
|
2023-07-05 20:11:40 +07:00
|
|
|
import werkzeug.urls
|
|
|
|
|
|
|
|
from odoo.http import route, request
|
|
|
|
from odoo.osv import expression
|
2021-10-20 19:28:29 +07:00
|
|
|
from odoo.addons.runbot_merge.controllers.dashboard import MergebotDashboard
|
|
|
|
|
2023-07-05 20:11:40 +07:00
|
|
|
DEFAULT_DELTA = datetime.timedelta(days=7)
|
2021-10-20 19:28:29 +07:00
|
|
|
class Dashboard(MergebotDashboard):
|
|
|
|
def _entries(self):
|
|
|
|
changelog = pathlib.Path(__file__).parent / 'changelog'
|
|
|
|
if not changelog.is_dir():
|
|
|
|
return super()._entries()
|
|
|
|
|
|
|
|
return super()._entries() + [
|
|
|
|
(d.name, [f.read_text(encoding='utf-8') for f in d.iterdir() if f.is_file()])
|
|
|
|
for d in changelog.iterdir()
|
|
|
|
]
|
|
|
|
|
2023-07-05 20:11:40 +07:00
|
|
|
|
|
|
|
@route('/forwardport/outstanding', type='http', methods=['GET'], auth="user", website=True, sitemap=False)
|
|
|
|
def outstanding(self, partner=0, authors=True, reviewers=True, group=0):
|
|
|
|
Partners = request.env['res.partner']
|
|
|
|
PullRequests = request.env['runbot_merge.pull_requests']
|
|
|
|
partner = Partners.browse(int(partner))
|
|
|
|
group = Partners.browse(int(group))
|
|
|
|
authors = int(authors)
|
|
|
|
reviewers = int(reviewers)
|
|
|
|
link = lambda **kw: '?' + werkzeug.urls.url_encode({'partner': partner.id or 0, 'authors': authors, 'reviewers': reviewers, **kw, })
|
|
|
|
groups = Partners.search([('is_company', '=', True), ('child_ids', '!=', False)])
|
|
|
|
if not (authors or reviewers):
|
|
|
|
return request.render('forwardport.outstanding', {
|
|
|
|
'authors': 0,
|
|
|
|
'reviewers': 0,
|
|
|
|
'single': partner,
|
|
|
|
'culprits': partner,
|
|
|
|
'groups': groups,
|
|
|
|
'current_group': group,
|
|
|
|
'outstanding': [],
|
|
|
|
'outstanding_per_author': {partner: 0},
|
|
|
|
'outstanding_per_reviewer': {partner: 0},
|
|
|
|
'link': link,
|
|
|
|
})
|
|
|
|
|
|
|
|
partner_filter = []
|
|
|
|
if partner or group:
|
|
|
|
if partner:
|
|
|
|
suffix = ''
|
|
|
|
arg = partner.id
|
|
|
|
else:
|
|
|
|
suffix = '.commercial_partner_id'
|
|
|
|
arg = group.id
|
|
|
|
|
|
|
|
if authors:
|
2025-01-23 21:51:20 +07:00
|
|
|
partner_filter.append([(f'source_id.author{suffix}', '=', arg)])
|
2023-07-05 20:11:40 +07:00
|
|
|
if reviewers:
|
2025-01-23 21:51:20 +07:00
|
|
|
partner_filter.append([(f'source_id.reviewed_by{suffix}', '=', arg)])
|
2023-07-05 20:11:40 +07:00
|
|
|
|
2025-02-18 15:42:29 +07:00
|
|
|
now = datetime.datetime.now()
|
2023-07-05 20:11:40 +07:00
|
|
|
outstanding = PullRequests.search([
|
2025-01-23 21:51:20 +07:00
|
|
|
('source_id', '!=', False),
|
|
|
|
('blocked', '!=', False),
|
2023-07-05 20:11:40 +07:00
|
|
|
('state', 'in', ['opened', 'validated', 'approved', 'ready', 'error']),
|
2025-02-18 15:42:29 +07:00
|
|
|
('create_date', '<', now - DEFAULT_DELTA),
|
2025-01-23 21:51:20 +07:00
|
|
|
*(partner_filter and expression.OR(partner_filter)),
|
2023-07-05 20:11:40 +07:00
|
|
|
])
|
2024-06-28 13:18:34 +07:00
|
|
|
|
|
|
|
outstanding_per_group = collections.Counter()
|
2023-07-05 20:11:40 +07:00
|
|
|
outstanding_per_author = collections.Counter()
|
|
|
|
outstanding_per_reviewer = collections.Counter()
|
|
|
|
outstandings = []
|
2025-02-18 15:42:29 +07:00
|
|
|
for source in outstanding.mapped('source_id').sorted(lambda s: s.merge_date or now):
|
2024-06-28 13:18:34 +07:00
|
|
|
prs = source.forwardport_ids.filtered(lambda p: p.state not in ['merged', 'closed'])
|
2023-07-05 20:11:40 +07:00
|
|
|
outstandings.append({
|
|
|
|
'source': source,
|
2024-06-28 13:18:34 +07:00
|
|
|
'prs': prs,
|
2023-07-05 20:11:40 +07:00
|
|
|
})
|
|
|
|
if authors:
|
2024-06-28 13:18:34 +07:00
|
|
|
outstanding_per_author[source.author] += len(prs)
|
2025-01-24 21:20:19 +07:00
|
|
|
if reviewers:
|
2024-06-28 13:18:34 +07:00
|
|
|
outstanding_per_reviewer[source.reviewed_by] += len(prs)
|
2025-01-24 21:20:19 +07:00
|
|
|
|
|
|
|
# if both the source and reviewer have the same team, don't count the PRs twice
|
|
|
|
for team in source.author.commercial_partner_id | source.reviewed_by.commercial_partner_id:
|
|
|
|
outstanding_per_group[team] += len(prs)
|
2023-07-05 20:11:40 +07:00
|
|
|
|
|
|
|
culprits = Partners.browse(p.id for p, _ in (outstanding_per_reviewer + outstanding_per_author).most_common())
|
|
|
|
return request.render('forwardport.outstanding', {
|
|
|
|
'authors': authors,
|
|
|
|
'reviewers': reviewers,
|
|
|
|
'single': partner,
|
|
|
|
'culprits': culprits,
|
|
|
|
'groups': groups,
|
|
|
|
'current_group': group,
|
|
|
|
'outstanding_per_author': outstanding_per_author,
|
|
|
|
'outstanding_per_reviewer': outstanding_per_reviewer,
|
2024-06-28 13:18:34 +07:00
|
|
|
'outstanding_per_group': outstanding_per_group,
|
2023-07-05 20:11:40 +07:00
|
|
|
'outstanding': outstandings,
|
|
|
|
'link': link,
|
|
|
|
})
|