From 56e242a660fa53d2bf340c9b1b63423fa766821e Mon Sep 17 00:00:00 2001 From: Xavier-Do Date: Wed, 25 Sep 2024 10:29:27 +0200 Subject: [PATCH] [IMP] runbot: refactor build error models The initial idea to link an error to another one was a quick solution to group them if they where related, but this became challenging to copute metada regarding errors. - The displayed error message was not always consistent with the real root cause/the error that lead here. - The aggregates (lets says, linked buils ids) could be the one of the error, or from all error messages. Same for the versions, first seen, .. This is confusing to knwo what is the leist we are managing and what is the expecte result to display Main motivation: on a standard error page (will be changed to "assignment"), we want to have the list of error message that is related to this one. We want to know for each message (a real build error) what is the version, first seen, ... This will give more flexibility on the display, The assigned person/team/test-tags, ... are moved to this model The appearance data remains on the build error but are aggregate on the assignation. --- runbot/__manifest__.py | 2 +- runbot/controllers/frontend.py | 10 +- runbot/data/error_link.xml | 10 + runbot/migrations/17.0.5.8/post-migration.py | 123 ++++ runbot/migrations/17.0.5.8/pre-migration.py | 4 + runbot/models/batch.py | 6 +- runbot/models/build.py | 5 +- runbot/models/build_error.py | 634 +++++++++++-------- runbot/models/commit.py | 1 - runbot/models/ir_logging.py | 15 +- runbot/models/team.py | 2 +- runbot/security/ir.model.access.csv | 2 + runbot/static/src/js/fields/fields.js | 35 +- runbot/templates/build.xml | 25 +- runbot/templates/build_error.xml | 12 +- runbot/tests/test_build_error.py | 298 ++++++--- runbot/views/build_error_views.xml | 245 +++++-- runbot/views/dashboard_views.xml | 6 +- runbot/views/menus.xml | 5 +- runbot_populate/models/runbot.py | 24 +- 20 files changed, 974 insertions(+), 490 deletions(-) create mode 100644 runbot/migrations/17.0.5.8/post-migration.py create mode 100644 runbot/migrations/17.0.5.8/pre-migration.py diff --git a/runbot/__manifest__.py b/runbot/__manifest__.py index 8cec3007..eab898c6 100644 --- a/runbot/__manifest__.py +++ b/runbot/__manifest__.py @@ -6,7 +6,7 @@ 'author': "Odoo SA", 'website': "http://runbot.odoo.com", 'category': 'Website', - 'version': '5.7', + 'version': '5.8', 'application': True, 'depends': ['base', 'base_automation', 'website'], 'data': [ diff --git a/runbot/controllers/frontend.py b/runbot/controllers/frontend.py index 5857c062..90ce4bc1 100644 --- a/runbot/controllers/frontend.py +++ b/runbot/controllers/frontend.py @@ -30,7 +30,7 @@ def route(routes, **kw): keep_search = request.httprequest.cookies.get('keep_search', False) == '1' cookie_search = request.httprequest.cookies.get('search', '') refresh = kwargs.get('refresh', False) - nb_build_errors = request.env['runbot.build.error'].search_count([('random', '=', True), ('parent_id', '=', False)]) + nb_build_errors = request.env['runbot.build.error'].search_count([]) nb_assigned_errors = request.env['runbot.build.error'].search_count([('responsible', '=', request.env.user.id)]) nb_team_errors = request.env['runbot.build.error'].search_count([('responsible', '=', False), ('team_id', 'in', request.env.user.runbot_team_ids.ids)]) kwargs['more'] = more @@ -459,7 +459,7 @@ class Runbot(Controller): ('responsible', '=', False), ('team_id', 'in', request.env.user.runbot_team_ids.ids) ], order='last_seen_date desc, build_count desc') - domain = [('parent_id', '=', False), ('responsible', '!=', request.env.user.id), ('build_count', '>', 1)] + domain = [('responsible', '!=', request.env.user.id), ('build_count', '>', 1)] build_errors_count = request.env['runbot.build.error'].search_count(domain) url_args = {} url_args['sort'] = sort @@ -481,7 +481,7 @@ class Runbot(Controller): @route(['/runbot/teams', '/runbot/teams/',], type='http', auth='user', website=True, sitemap=False) def team_dashboards(self, team=None, hide_empty=False, **kwargs): teams = request.env['runbot.team'].search([]) if not team else None - domain = [('id', 'in', team.build_error_ids.ids)] if team else [] + domain = [('id', 'in', team.assignment_ids.ids)] if team else [] # Sort & Filter sortby = kwargs.get('sortby', 'count') @@ -496,7 +496,7 @@ class Runbot(Controller): 'not_one': {'label': 'Seen more than once', 'domain': [('build_count', '>', 1)]}, } - for trigger in team.build_error_ids.trigger_ids if team else []: + for trigger in team.assignment_ids.trigger_ids if team else []: k = f'trigger_{trigger.name.lower().replace(" ", "_")}' searchbar_filters.update( {k: {'label': f'Trigger {trigger.name}', 'domain': [('trigger_ids', '=', trigger.id)]}} @@ -510,7 +510,7 @@ class Runbot(Controller): qctx = { 'team': team, 'teams': teams, - 'build_error_ids': request.env['runbot.build.error'].search(domain, order=order), + 'build_assignment_ids': request.env['runbot.build.assignment'].search(domain, order=order), 'hide_empty': bool(hide_empty), 'searchbar_sortings': searchbar_sortings, 'sortby': sortby, diff --git a/runbot/data/error_link.xml b/runbot/data/error_link.xml index bc5ade63..1f148d78 100644 --- a/runbot/data/error_link.xml +++ b/runbot/data/error_link.xml @@ -9,6 +9,16 @@ records.action_link_errors() + + Link build errors contents + + + ir.actions.server + code + + records.action_link_errors_content() + + Re-clean build errors diff --git a/runbot/migrations/17.0.5.8/post-migration.py b/runbot/migrations/17.0.5.8/post-migration.py new file mode 100644 index 00000000..d65051f3 --- /dev/null +++ b/runbot/migrations/17.0.5.8/post-migration.py @@ -0,0 +1,123 @@ +import logging + +_logger = logging.getLogger(__name__) + + +def migrate(cr, version): + + # get seen infos + cr.execute("SELECT error_content_id, min(build_id), min(log_date), max(build_id), max(log_date), count(DISTINCT build_id) FROM runbot_build_error_link GROUP BY error_content_id") + vals_by_error = {error: vals for error, *vals in cr.fetchall()} + + # first_seen_build_id was not stored, lets fill it and update all values for good mesure + for error, vals in vals_by_error.items(): + cr.execute('UPDATE runbot_build_error_content SET first_seen_build_id = %s, first_seen_date = %s, last_seen_build_id = %s, last_seen_date = %s WHERE id=%s', (vals[0], vals[1], vals[2], vals[3], error)) + + # generate flattened error hierarchy + cr.execute('''SELECT + id, + parent_id + FROM runbot_build_error_content + ORDER BY id + ''') + + error_by_parent = {} + for error_id, parent_id in cr.fetchall(): + if parent_id: + error_by_parent.setdefault(parent_id, []).append(error_id) + stable = False + while not stable: + stable = True + for parent, child_ids in error_by_parent.items(): + for child_id in child_ids: + if parent == child_id: + continue + sub_childrens = error_by_parent.get(child_id) + if sub_childrens: + error_by_parent[parent] = error_by_parent[parent] + sub_childrens + error_by_parent[child_id] = [] + stable = False + for parent, child_ids in error_by_parent.items(): + if parent in child_ids: + _logger.info('Breaking cycle parent on %s', parent) + error_by_parent[parent] = [c for c in child_ids if c != parent] + cr.execute('UPDATE runbot_build_error_content SET parent_id = null WHERE id=%s', (parent,)) + error_by_parent = {parent: chilren for parent, chilren in error_by_parent.items() if chilren} + + cr.execute('''SELECT + id, + active, + parent_id + random, + content, + test_tags, + tags_min_version_id, + tags_max_version_id, + team_id, + responsible, + customer, + fixing_commit, + fixing_pr_id + FROM runbot_build_error_content + WHERE parent_id IS null + ORDER BY id + ''') + errors = cr.fetchall() + nb_groups = len(error_by_parent) + _logger.info('Creating %s errors', nb_groups) + for error in errors: + error_id, *values = error + children = error_by_parent.get(error_id, []) + assert not error_id in children + all_errors = [error_id, *children] + error_count = len(all_errors) + + # vals_by_error order: min(build_id), min(log_date), max(build_id), max(log_date) + build_count = 0 + first_seen_build_id = first_seen_date = last_seen_build_id = last_seen_date = None + if error_id in vals_by_error: + error_vals = [vals_by_error[error_id] for error_id in all_errors] + first_seen_build_id = min(vals[0] for vals in error_vals) + first_seen_date = min(vals[1] for vals in error_vals) + last_seen_build_id = max(vals[2] for vals in error_vals) + last_seen_date = max(vals[3] for vals in error_vals) + build_count = sum(vals[4] for vals in error_vals) # not correct for distinct but close enough + assert first_seen_date <= last_seen_date + assert first_seen_build_id <= last_seen_build_id + name = values[2].split('\n')[0] + + values = [error_id, *values, last_seen_build_id, first_seen_build_id, last_seen_date, first_seen_date, build_count, error_count, name] + + cr.execute(''' + INSERT INTO runbot_build_error ( + id, + active, + random, + description, + test_tags, + tags_min_version_id, + tags_max_version_id, + team_id, + responsible, + customer, + fixing_commit, + fixing_pr_id, + last_seen_build_id, + first_seen_build_id, + last_seen_date, + first_seen_date, + build_count, + error_count, + name + ) + VALUES (%s) + RETURNING id + ''' % ', '.join(['%s'] * len(values)), values) # noqa: S608 + + error_id = cr.fetchone() + cr.execute('UPDATE runbot_build_error_content SET error_id = %s WHERE id in %s', (error_id, tuple(all_errors))) + + cr.execute('ALTER TABLE runbot_build_error_content ALTER COLUMN error_id SET NOT NULL') + cr.execute('SELECT max(id) from runbot_build_error') + cr.execute("SELECT SETVAL('runbot_build_error_id_seq', %s)", (cr.fetchone()[0] + 1,)) + _logger.info('Done') diff --git a/runbot/migrations/17.0.5.8/pre-migration.py b/runbot/migrations/17.0.5.8/pre-migration.py new file mode 100644 index 00000000..fb550ef4 --- /dev/null +++ b/runbot/migrations/17.0.5.8/pre-migration.py @@ -0,0 +1,4 @@ +def migrate(cr, version): + cr.execute('ALTER TABLE runbot_build_error RENAME TO runbot_build_error_content') + cr.execute('ALTER TABLE runbot_build_error_content ADD COLUMN first_seen_build_id INT') + cr.execute('ALTER TABLE runbot_build_error_link RENAME COLUMN build_error_id TO error_content_id') diff --git a/runbot/models/batch.py b/runbot/models/batch.py index f34deb2c..5df699d5 100644 --- a/runbot/models/batch.py +++ b/runbot/models/batch.py @@ -237,8 +237,9 @@ class Batch(models.Model): # use last not preparing batch to define previous repos_heads instead of branches heads: # Will allow to have a diff info on base bundle, compare with previous bundle last_base_batch = self.env['runbot.batch'].search([('bundle_id', '=', bundle.base_id.id), ('state', '!=', 'preparing'), ('category_id', '=', self.category_id.id), ('id', '!=', self.id)], order='id desc', limit=1) - base_head_per_repo = {commit.repo_id.id: commit for commit in last_base_batch.commit_ids} - self._update_commits_infos(base_head_per_repo) # set base_commit, diff infos, ... + if last_base_batch: + base_head_per_repo = {commit.repo_id.id: commit for commit in last_base_batch.commit_ids} + self._update_commits_infos(base_head_per_repo) # set base_commit, diff infos, ... # 2. FIND missing commit in a compatible base bundle if bundle.is_base or auto_rebase: @@ -496,7 +497,6 @@ class BatchSlot(models.Model): _description = 'Link between a bundle batch and a build' _order = 'trigger_id,id' - batch_id = fields.Many2one('runbot.batch', index=True) trigger_id = fields.Many2one('runbot.trigger', index=True) build_id = fields.Many2one('runbot.build', index=True) diff --git a/runbot/models/build.py b/runbot/models/build.py index 159f2b30..1fbe3ccf 100644 --- a/runbot/models/build.py +++ b/runbot/models/build.py @@ -316,7 +316,7 @@ class BuildResult(models.Model): @api.depends('build_error_link_ids') def _compute_build_error_ids(self): for record in self: - record.build_error_ids = record.build_error_link_ids.mapped('build_error_id') + record.build_error_ids = record.build_error_link_ids.error_content_id.error_id def _get_worst_result(self, results, max_res=False): results = [result for result in results if result] # filter Falsy values @@ -1182,11 +1182,10 @@ class BuildResult(models.Model): def _parse_logs(self): """ Parse build logs to classify errors """ - BuildError = self.env['runbot.build.error'] # only parse logs from builds in error and not already scanned builds_to_scan = self.search([('id', 'in', self.ids), ('local_result', 'in', ('ko', 'killed', 'warn')), ('build_error_link_ids', '=', False)]) ir_logs = self.env['ir.logging'].search([('level', 'in', ('ERROR', 'WARNING', 'CRITICAL')), ('type', '=', 'server'), ('build_id', 'in', builds_to_scan.ids)]) - return BuildError._parse_logs(ir_logs) + return self.env['runbot.build.error']._parse_logs(ir_logs) def _is_file(self, file, mode='r'): file_path = self._path(file) diff --git a/runbot/models/build_error.py b/runbot/models/build_error.py index ba370a91..d3acf2e4 100644 --- a/runbot/models/build_error.py +++ b/runbot/models/build_error.py @@ -19,7 +19,7 @@ class BuildErrorLink(models.Model): _order = 'log_date desc, build_id desc' build_id = fields.Many2one('runbot.build', required=True, index=True) - build_error_id =fields.Many2one('runbot.build.error', required=True, index=True, ondelete='cascade') + error_content_id = fields.Many2one('runbot.build.error.content', required=True, index=True, ondelete='cascade') log_date = fields.Datetime(string='Log date') host = fields.Char(related='build_id.host') dest = fields.Char(related='build_id.dest') @@ -29,26 +29,58 @@ class BuildErrorLink(models.Model): build_url = fields.Char(related='build_id.build_url') _sql_constraints = [ - ('error_build_rel_unique', 'UNIQUE (build_id, build_error_id)', 'A link between a build and an error must be unique'), + ('error_build_rel_unique', 'UNIQUE (build_id, error_content_id)', 'A link between a build and an error must be unique'), ] +class BuildErrorSeenMixin(models.AbstractModel): + _name = 'runbot.build.error.seen.mixin' + _description = "Add last/firt build/log_date for error and asssignments" + + first_seen_build_id = fields.Many2one('runbot.build', compute='_compute_seen', string='First Seen build', store=True) + first_seen_date = fields.Datetime(string='First Seen Date', compute='_compute_seen', store=True) + last_seen_build_id = fields.Many2one('runbot.build', compute='_compute_seen', string='Last Seen build', store=True) + last_seen_date = fields.Datetime(string='Last Seen Date', compute='_compute_seen', store=True) + build_count = fields.Integer(string='Nb Seen', compute='_compute_seen', store=True) + + @api.depends('build_error_link_ids') + def _compute_seen(self): + for record in self: + record.first_seen_date = False + record.last_seen_date = False + record.build_count = 0 + error_link_ids = record.build_error_link_ids.sorted('log_date') + if error_link_ids: + first_error_link = error_link_ids[0] + last_error_link = error_link_ids[-1] + record.first_seen_date = first_error_link.log_date + record.last_seen_date = last_error_link.log_date + record.first_seen_build_id = first_error_link.build_id + record.last_seen_build_id = last_error_link.build_id + record.build_count = len(error_link_ids.build_id) + + +def _compute_related_error_content_ids(field_name): + @api.depends(f'error_content_ids.{field_name}') + def _compute(self): + for record in self: + record[field_name] = record.error_content_ids[field_name] + return _compute + + class BuildError(models.Model): - _name = "runbot.build.error" - _description = "Build error" + _description = "An object to manage a group of errors log that fit together and assign them to a team" + _inherit = ('mail.thread', 'mail.activity.mixin', 'runbot.build.error.seen.mixin') - _inherit = ['mail.thread', 'mail.activity.mixin'] - _rec_name = "id" + name = fields.Char("Name") + active = fields.Boolean('Open (not fixed)', default=True, tracking=True) + description = fields.Text("Description", store=True, compute='_compute_description') + content = fields.Text("Error contents", compute='_compute_content', search="_search_content") + error_content_ids = fields.One2many('runbot.build.error.content', 'error_id') + error_count = fields.Integer("Error count", store=True, compute='_compute_count') + previous_error_id = fields.Many2one('runbot.build.error', string="Already seen error") - content = fields.Text('Error message', required=True) - cleaned_content = fields.Text('Cleaned error message') - summary = fields.Char('Content summary', compute='_compute_summary', store=False) - module_name = fields.Char('Module name') # name in ir_logging - file_path = fields.Char('File Path') # path in ir logging - function = fields.Char('Function name') # func name in ir logging - fingerprint = fields.Char('Error fingerprint', index=True) - random = fields.Boolean('underterministic error', tracking=True) responsible = fields.Many2one('res.users', 'Assigned fixer', tracking=True) customer = fields.Many2one('res.users', 'Customer', tracking=True) team_id = fields.Many2one('runbot.team', 'Assigned team', tracking=True) @@ -56,27 +88,58 @@ class BuildError(models.Model): fixing_pr_id = fields.Many2one('runbot.branch', 'Fixing PR', tracking=True, domain=[('is_pr', '=', True)]) fixing_pr_alive = fields.Boolean('Fixing PR alive', related='fixing_pr_id.alive') fixing_pr_url = fields.Char('Fixing PR url', related='fixing_pr_id.branch_url') - build_error_link_ids = fields.One2many('runbot.build.error.link', 'build_error_id') - children_build_error_link_ids = fields.One2many('runbot.build.error.link', compute='_compute_children_build_error_link_ids') - build_ids = fields.Many2many('runbot.build', compute= '_compute_build_ids') - bundle_ids = fields.One2many('runbot.bundle', compute='_compute_bundle_ids') - version_ids = fields.One2many('runbot.version', compute='_compute_version_ids', string='Versions', search='_search_version') - trigger_ids = fields.Many2many('runbot.trigger', compute='_compute_trigger_ids', string='Triggers', search='_search_trigger_ids') - active = fields.Boolean('Active (not fixed)', default=True, tracking=True) - tag_ids = fields.Many2many('runbot.build.error.tag', string='Tags') - build_count = fields.Integer(compute='_compute_build_counts', string='Nb seen', store=True) - parent_id = fields.Many2one('runbot.build.error', 'Linked to', index=True) - child_ids = fields.One2many('runbot.build.error', 'parent_id', string='Child Errors', context={'active_test': False}) - children_build_ids = fields.Many2many('runbot.build', compute='_compute_children_build_ids', string='Children builds') - error_history_ids = fields.Many2many('runbot.build.error', compute='_compute_error_history_ids', string='Old errors', context={'active_test': False}) - first_seen_build_id = fields.Many2one('runbot.build', compute='_compute_first_seen_build_id', string='First Seen build') - first_seen_date = fields.Datetime(string='First Seen Date', compute='_compute_seen_date', store=True) - last_seen_build_id = fields.Many2one('runbot.build', compute='_compute_last_seen_build_id', string='Last Seen build', store=True) - last_seen_date = fields.Datetime(string='Last Seen Date', compute='_compute_seen_date', store=True) + test_tags = fields.Char(string='Test tags', help="Comma separated list of test_tags to use to reproduce/remove this error", tracking=True) tags_min_version_id = fields.Many2one('runbot.version', 'Tags Min version', help="Minimal version where the test tags will be applied.") tags_max_version_id = fields.Many2one('runbot.version', 'Tags Max version', help="Maximal version where the test tags will be applied.") + # Build error related data + build_error_link_ids = fields.Many2many('runbot.build.error.link', compute=_compute_related_error_content_ids('build_error_link_ids')) + unique_build_error_link_ids = fields.Many2many('runbot.build.error.link', compute='_compute_unique_build_error_link_ids') + build_ids = fields.Many2many('runbot.build', compute=_compute_related_error_content_ids('build_ids')) + bundle_ids = fields.Many2many('runbot.bundle', compute=_compute_related_error_content_ids('bundle_ids')) + version_ids = fields.Many2many('runbot.version', string='Versions', compute=_compute_related_error_content_ids('version_ids')) + trigger_ids = fields.Many2many('runbot.trigger', string='Triggers', compute=_compute_related_error_content_ids('trigger_ids')) + tag_ids = fields.Many2many('runbot.build.error.tag', string='Tags', compute=_compute_related_error_content_ids('tag_ids')) + + random = fields.Boolean('Random', compute="_compute_random", store=True) + + @api.depends('build_error_link_ids') + def _compute_unique_build_error_link_ids(self): + for record in self: + seen = set() + id_list = [] + for error_link in record.build_error_link_ids: + if error_link.build_id.id not in seen: + seen.add(error_link.build_id.id) + id_list.append(error_link.id) + record.unique_build_error_link_ids = record.env['runbot.build.error.link'].browse(id_list) + + @api.depends('name', 'error_content_ids') + def _compute_description(self): + for record in self: + record.description = record.name + if record.error_content_ids: + record.description = record.error_content_ids[0].content + + def _compute_content(self): + for record in self: + record.content = '\n'.join(record.error_content_ids.mapped('content')) + + def _search_content(self, operator, value): + return [('error_content_ids', 'any', [('content', operator, value)])] + + @api.depends('error_content_ids') + def _compute_count(self): + for record in self: + record.error_count = len(record.error_content_ids) + + @api.depends('error_content_ids') + def _compute_random(self): + for record in self: + record.random = any(error.random for error in record.error_content_ids) + + @api.constrains('test_tags') def _check_test_tags(self): for build_error in self: @@ -85,24 +148,16 @@ class BuildError(models.Model): @api.onchange('test_tags') def _onchange_test_tags(self): - self.tags_min_version_id = min(self.version_ids, key=lambda rec: rec.number) - self.tags_max_version_id = max(self.version_ids, key=lambda rec: rec.number) + if self.test_tags and self.version_ids: + self.tags_min_version_id = min(self.version_ids, key=lambda rec: rec.number) + self.tags_max_version_id = max(self.version_ids, key=lambda rec: rec.number) @api.onchange('customer') def _onchange_customer(self): if not self.responsible: self.responsible = self.customer - @api.model_create_multi def create(self, vals_list): - cleaners = self.env['runbot.error.regex'].search([('re_type', '=', 'cleaning')]) - for vals in vals_list: - content = vals.get('content') - cleaned_content = cleaners._r_sub(content) - vals.update({ - 'cleaned_content': cleaned_content, - 'fingerprint': self._digest(cleaned_content) - }) records = super().create(vals_list) records.action_assign() return records @@ -110,170 +165,35 @@ class BuildError(models.Model): def write(self, vals): if 'active' in vals: for build_error in self: - (build_error.child_ids - self).write({'active': vals['active']}) if not (self.env.su or self.user_has_groups('runbot.group_runbot_admin')): if build_error.test_tags: raise UserError("This error as a test-tag and can only be (de)activated by admin") if not vals['active'] and build_error.last_seen_date + relativedelta(days=1) > fields.Datetime.now(): raise UserError("This error broke less than one day ago can only be deactivated by admin") - if 'cleaned_content' in vals: - vals.update({'fingerprint': self._digest(vals['cleaned_content'])}) - result = super(BuildError, self).write(vals) - if vals.get('parent_id'): - for build_error in self: - parent = build_error.parent_id - if build_error.test_tags: - if parent.test_tags and not self.env.su: - raise UserError(f"Cannot parent an error with test tags: {build_error.test_tags}") - elif not parent.test_tags: - parent.sudo().test_tags = build_error.test_tags - build_error.sudo().test_tags = False - if build_error.responsible: - if parent.responsible and parent.responsible != build_error.responsible and not self.env.su: - raise UserError(f"Error {parent.id} as already a responsible ({parent.responsible}) cannot assign {build_error.responsible}") - else: - parent.responsible = build_error.responsible - build_error.responsible = False - if build_error.team_id: - if not parent.team_id: - parent.team_id = build_error.team_id - build_error.team_id = False - return result + return super().write(vals) - @api.depends('build_error_link_ids') - def _compute_build_ids(self): - for record in self: - record.build_ids = record.build_error_link_ids.mapped('build_id') - - @api.depends('build_error_link_ids') - def _compute_children_build_error_link_ids(self): - for record in self: - record.children_build_error_link_ids = record.build_error_link_ids | record.child_ids.build_error_link_ids - - @api.depends('build_ids', 'child_ids.build_ids') - def _compute_build_counts(self): - for build_error in self: - build_error.build_count = len(build_error.build_ids | build_error.mapped('child_ids.build_ids')) - - @api.depends('build_ids') - def _compute_bundle_ids(self): - for build_error in self: - top_parent_builds = build_error.build_ids.mapped(lambda rec: rec and rec.top_parent) - build_error.bundle_ids = top_parent_builds.mapped('slot_ids').mapped('batch_id.bundle_id') - - @api.depends('children_build_ids') - def _compute_version_ids(self): - for build_error in self: - build_error.version_ids = build_error.children_build_ids.version_id - - @api.depends('children_build_ids') - def _compute_trigger_ids(self): - for build_error in self: - build_error.trigger_ids = build_error.children_build_ids.trigger_id - - @api.depends('content') - def _compute_summary(self): - for build_error in self: - build_error.summary = build_error.content[:80] - - - @api.depends('build_ids', 'child_ids.build_ids') - def _compute_children_build_ids(self): - for build_error in self: - all_builds = build_error.build_ids | build_error.mapped('child_ids.build_ids') - build_error.children_build_ids = all_builds.sorted(key=lambda rec: rec.id, reverse=True) - - @api.depends('children_build_ids') - def _compute_last_seen_build_id(self): - for build_error in self: - build_error.last_seen_build_id = build_error.children_build_ids and build_error.children_build_ids[0] or False - - @api.depends('build_error_link_ids', 'child_ids.build_error_link_ids') - def _compute_seen_date(self): - for build_error in self: - error_dates = (build_error.build_error_link_ids | build_error.child_ids.build_error_link_ids).mapped('log_date') - build_error.first_seen_date = error_dates and min(error_dates) - build_error.last_seen_date = error_dates and max(error_dates) - - @api.depends('children_build_ids') - def _compute_first_seen_build_id(self): - for build_error in self: - build_error.first_seen_build_id = build_error.children_build_ids and build_error.children_build_ids[-1] or False - - @api.depends('fingerprint', 'child_ids.fingerprint') - def _compute_error_history_ids(self): - for error in self: - fingerprints = [error.fingerprint] + [rec.fingerprint for rec in error.child_ids] - error.error_history_ids = self.search([('fingerprint', 'in', fingerprints), ('active', '=', False), ('id', '!=', error.id or False)]) - - @api.model - def _digest(self, s): - """ - return a hash 256 digest of the string s - """ - return hashlib.sha256(s.encode()).hexdigest() - - @api.model - def _parse_logs(self, ir_logs): - if not ir_logs: - return - regexes = self.env['runbot.error.regex'].search([]) - search_regs = regexes.filtered(lambda r: r.re_type == 'filter') - cleaning_regs = regexes.filtered(lambda r: r.re_type == 'cleaning') - - hash_dict = defaultdict(self.env['ir.logging'].browse) - for log in ir_logs: - if search_regs._r_search(log.message): - continue - fingerprint = self._digest(cleaning_regs._r_sub(log.message)) - hash_dict[fingerprint] |= log - - build_errors = self.env['runbot.build.error'] - # add build ids to already detected errors - existing_errors = self.env['runbot.build.error'].search([('fingerprint', 'in', list(hash_dict.keys())), ('active', '=', True)]) - existing_fingerprints = existing_errors.mapped('fingerprint') - build_errors |= existing_errors - for build_error in existing_errors: - logs = hash_dict[build_error.fingerprint] - # update filepath if it changed. This is optionnal and mainly there in case we adapt the OdooRunner log - if logs[0].path != build_error.file_path: - build_error.file_path = logs[0].path - build_error.function = logs[0].func - - # create an error for the remaining entries - for fingerprint, logs in hash_dict.items(): - if fingerprint in existing_fingerprints: - continue - new_build_error = self.env['runbot.build.error'].create({ - 'content': logs[0].message, - 'module_name': logs[0].name.removeprefix('odoo.').removeprefix('addons.'), - 'file_path': logs[0].path, - 'function': logs[0].func, - }) - build_errors |= new_build_error - existing_fingerprints.append(fingerprint) - - for build_error in build_errors: - logs = hash_dict[build_error.fingerprint] - for rec in logs: - if rec.build_id not in build_error.build_error_link_ids.build_id: - self.env['runbot.build.error.link'].create({ - 'build_id': rec.build_id.id, - 'build_error_id': build_error.id, - 'log_date': rec.create_date - }) - - if build_errors: - window_action = { - "type": "ir.actions.act_window", - "res_model": "runbot.build.error", - "views": [[False, "tree"]], - "domain": [('id', 'in', build_errors.ids)] - } - if len(build_errors) == 1: - window_action["views"] = [[False, "form"]] - window_action["res_id"] = build_errors.id - return window_action + def _merge(self, others): + self.ensure_one + error = self + for previous_error in others: + # todo, check that all relevant fields are checked and transfered/logged + if previous_error.test_tags and error.test_tags != previous_error.test_tags: + if previous_error.test_tags and not self.env.su: + raise UserError(f"Cannot merge an error with test tags: {previous_error.test_tags}") + elif not error.test_tags: + error.sudo().test_tags = previous_error.test_tags + previous_error.sudo().test_tags = False + if previous_error.responsible: + if error.responsible and error.responsible != previous_error.responsible and not self.env.su: + raise UserError(f"error {error.id} as already a responsible ({error.responsible}) cannot assign {previous_error.responsible}") + else: + error.responsible = previous_error.responsible + if previous_error.team_id: + if not error.team_id: + error.team_id = previous_error.team_id + previous_error.error_content_ids.write({'error_id': self}) + if not previous_error.test_tags: + previous_error.active = False @api.model def _test_tags_list(self, build_id=False): @@ -293,6 +213,224 @@ class BuildError(models.Model): def _disabling_tags(self, build_id=False): return ['-%s' % tag for tag in self._test_tags_list(build_id)] + def _get_form_url(self): + self.ensure_one() + return url_join(self.get_base_url(), f'/web#id={self.id}&model=runbot.build.error&view_type=form') + + def _get_form_link(self): + self.ensure_one() + return Markup('%s') % (self._get_form_url(), self.id) + + def action_view_errors(self): + return { + 'type': 'ir.actions.act_window', + 'views': [(False, 'tree'), (False, 'form')], + 'res_model': 'runbot.build.error.content', + 'domain': [('error_id', '=', self.id)], + 'context': {'active_test': False}, + 'target': 'current', + } + + def action_assign(self): + teams = None + repos = None + for record in self: + if not record.responsible and not record.team_id: + for error_content in record.error_content_ids: + if error_content.file_path: + if teams is None: + teams = self.env['runbot.team'].search(['|', ('path_glob', '!=', False), ('module_ownership_ids', '!=', False)]) + repos = self.env['runbot.repo'].search([]) + team = teams._get_team(error_content.file_path, repos) + if team: + record.team_id = team + break + + @api.model + def _parse_logs(self, ir_logs): + if not ir_logs: + return + regexes = self.env['runbot.error.regex'].search([]) + search_regs = regexes.filtered(lambda r: r.re_type == 'filter') + cleaning_regs = regexes.filtered(lambda r: r.re_type == 'cleaning') + + hash_dict = defaultdict(self.env['ir.logging'].browse) + for log in ir_logs: + if search_regs._r_search(log.message): + continue + fingerprint = self.env['runbot.build.error.content']._digest(cleaning_regs._r_sub(log.message)) + hash_dict[fingerprint] |= log + + build_error_contents = self.env['runbot.build.error.content'] + # add build ids to already detected errors + existing_errors_contents = self.env['runbot.build.error.content'].search([('fingerprint', 'in', list(hash_dict.keys())), ('error_id.active', '=', True)]) + existing_fingerprints = existing_errors_contents.mapped('fingerprint') + build_error_contents |= existing_errors_contents + # for build_error_content in existing_errors_contents: + # logs = hash_dict[build_error_content.fingerprint] + # # update filepath if it changed. This is optionnal and mainly there in case we adapt the OdooRunner log + # if logs[0].path != build_error_content.file_path: + # build_error_content.file_path = logs[0].path + # build_error_content.function = logs[0].func + + # create an error for the remaining entries + for fingerprint, logs in hash_dict.items(): + if fingerprint in existing_fingerprints: + continue + new_build_error_content = self.env['runbot.build.error.content'].create({ + 'content': logs[0].message, + 'module_name': logs[0].name.removeprefix('odoo.').removeprefix('addons.'), + 'file_path': logs[0].path, + 'function': logs[0].func, + }) + build_error_contents |= new_build_error_content + existing_fingerprints.append(fingerprint) + + for build_error_content in build_error_contents: + logs = hash_dict[build_error_content.fingerprint] + for rec in logs: + if rec.build_id not in build_error_content.build_ids: + self.env['runbot.build.error.link'].create({ + 'build_id': rec.build_id.id, + 'error_content_id': build_error_content.id, + 'log_date': rec.create_date, + }) + + if build_error_contents: + window_action = { + "type": "ir.actions.act_window", + "res_model": "runbot.build.error", + "views": [[False, "tree"]], + "domain": [('id', 'in', build_error_contents.ids)] + } + if len(build_error_contents) == 1: + window_action["views"] = [[False, "form"]] + window_action["res_id"] = build_error_contents.id + return window_action + + def action_link_errors(self): + if len(self) < 2: + return + # sort self so that the first one is the one that has test tags or responsible, or the oldest. + self_sorted = self.sorted(lambda error: (not error.test_tags, not error.responsible, error.error_count, error.id)) + base_error = self_sorted[0] + base_error._merge(self_sorted - base_error) + + +class BuildErrorContent(models.Model): + + _name = 'runbot.build.error.content' + _description = "Build error log" + + _inherit = ('mail.thread', 'mail.activity.mixin', 'runbot.build.error.seen.mixin') + _rec_name = "id" + + error_id = fields.Many2one('runbot.build.error', 'Linked to', index=True, required=True) + content = fields.Text('Error message', required=True) + cleaned_content = fields.Text('Cleaned error message') + summary = fields.Char('Content summary', compute='_compute_summary', store=False) + module_name = fields.Char('Module name') # name in ir_logging + file_path = fields.Char('File Path') # path in ir logging + function = fields.Char('Function name') # func name in ir logging + fingerprint = fields.Char('Error fingerprint', index=True) + random = fields.Boolean('underterministic error', tracking=True) + build_error_link_ids = fields.One2many('runbot.build.error.link', 'error_content_id') + + build_ids = fields.Many2many('runbot.build', compute='_compute_build_ids') + bundle_ids = fields.One2many('runbot.bundle', compute='_compute_bundle_ids') + version_ids = fields.One2many('runbot.version', compute='_compute_version_ids', string='Versions', search='_search_version') + trigger_ids = fields.Many2many('runbot.trigger', compute='_compute_trigger_ids', string='Triggers', search='_search_trigger_ids') + tag_ids = fields.Many2many('runbot.build.error.tag', string='Tags') + + responsible = fields.Many2one(related='error_id.responsible') + customer = fields.Many2one(related='error_id.customer') + team_id = fields.Many2one(related='error_id.team_id') + fixing_commit = fields.Char(related='error_id.fixing_commit') + fixing_pr_id = fields.Many2one(related='error_id.fixing_pr_id') + fixing_pr_alive = fields.Boolean(related='error_id.fixing_pr_alive') + fixing_pr_url = fields.Char(related='error_id.fixing_pr_url') + test_tags = fields.Char(related='error_id.test_tags') + tags_min_version_id = fields.Many2one(related='error_id.tags_min_version_id') + tags_max_version_id = fields.Many2one(related='error_id.tags_max_version_id') + + def _set_error_history(self): + for error_content in self: + if not error_content.error_id.previous_error_id: + previous_error_content = error_content.search([ + ('fingerprint', '=', error_content.fingerprint), + ('error_id.active', '=', False), + ('id', '!=', error_content.id or False), + ]) + if previous_error_content and previous_error_content != error_content.error_id: + error_content.error_id.message_post(body=f"An historical error was found for error {error_content.id}: {previous_error_content.id}") + error_content.error_id.previous_error_id = previous_error_content.error_id + + @api.model_create_multi + def create(self, vals_list): + cleaners = self.env['runbot.error.regex'].search([('re_type', '=', 'cleaning')]) + for vals in vals_list: + if not vals.get('error_id'): + # TODO, try to find an existing one that could match, will be done in another pr + name = vals.get('content', '').split('\n')[0][:1000] + error = self.env['runbot.build.error'].create({ + 'name': name, + }) + vals['error_id'] = error.id + content = vals.get('content') + cleaned_content = cleaners._r_sub(content) + vals.update({ + 'cleaned_content': cleaned_content, + 'fingerprint': self._digest(cleaned_content) + }) + records = super().create(vals_list) + records._set_error_history() + records.error_id.action_assign() + return records + + def write(self, vals): + if 'cleaned_content' in vals: + vals.update({'fingerprint': self._digest(vals['cleaned_content'])}) + initial_errors = self.mapped('error_id') + result = super().write(vals) + if vals.get('error_id'): + for build_error, previous_error in zip(self, initial_errors): + if not previous_error.error_content_ids: + build_error.error_id._merge(previous_error) + return result + + @api.depends('build_error_link_ids') + def _compute_build_ids(self): + for record in self: + record.build_ids = record.build_error_link_ids.mapped('build_id').sorted('id') + + @api.depends('build_ids') + def _compute_bundle_ids(self): + for build_error in self: + top_parent_builds = build_error.build_ids.mapped(lambda rec: rec and rec.top_parent) + build_error.bundle_ids = top_parent_builds.mapped('slot_ids').mapped('batch_id.bundle_id') + + @api.depends('build_ids') + def _compute_version_ids(self): + for build_error in self: + build_error.version_ids = build_error.build_ids.version_id + + @api.depends('build_ids') + def _compute_trigger_ids(self): + for build_error in self: + build_error.trigger_ids = build_error.build_ids.trigger_id + + @api.depends('content') + def _compute_summary(self): + for build_error in self: + build_error.summary = build_error.content[:80] + + @api.model + def _digest(self, s): + """ + return a hash 256 digest of the string s + """ + return hashlib.sha256(s.encode()).hexdigest() + def _search_version(self, operator, value): exclude_domain = [] if operator == '=': @@ -303,93 +441,63 @@ class BuildError(models.Model): def _search_trigger_ids(self, operator, value): return [('build_error_link_ids.trigger_id', operator, value)] - def _get_form_url(self): - self.ensure_one() - return url_join(self.get_base_url(), f'/web#id={self.id}&model=runbot.build.error&view_type=form') - - def _get_form_link(self): - self.ensure_one() - return Markup(f'%s') % (self._get_form_url(), self.id) - def _merge(self): if len(self) < 2: return _logger.debug('Merging errors %s', self) - base_error = self[0] - base_linked = self[0].parent_id or self[0] - for error in self[1:]: - assert base_error.fingerprint == error.fingerprint, f'Errors {base_error.id} and {error.id} have a different fingerprint' - if error.test_tags and not base_linked.test_tags: - base_linked.test_tags = error.test_tags - if not base_linked.active and error.active: - base_linked.active = True - base_error.message_post(body=Markup('⚠ test-tags inherited from error %s') % error._get_form_link()) - elif base_linked.test_tags and error.test_tags and base_linked.test_tags != error.test_tags: - base_error.message_post(body=Markup('⚠ trying to merge errors with different test-tags from %s tag: "%s"') % (error._get_form_link(), error.test_tags)) - error.message_post(body=Markup('⚠ trying to merge errors with different test-tags from %s tag: "%s"') % (base_error._get_form_link(), base_error.test_tags)) - continue - - for build_error_link in error.build_error_link_ids: - if build_error_link.build_id not in base_error.build_error_link_ids.build_id: - build_error_link.build_error_id = base_error + base_error_content = self[0] + base_error = base_error_content.error_id + errors = self.env['runbot.build.error'] + for error_content in self[1:]: + assert base_error_content.fingerprint == error_content.fingerprint, f'Errors {base_error_content.id} and {error_content.id} have a different fingerprint' + for build_error_link in error_content.build_error_link_ids: + if build_error_link.build_id not in base_error_content.build_error_link_ids.build_id: + build_error_link.error_content_id = base_error_content else: # as the relation already exists and was not transferred we can remove the old one build_error_link.unlink() - - if error.responsible and not base_linked.responsible: - base_error.responsible = error.responsible - elif base_linked.responsible and error.responsible and base_linked.responsible != error.responsible: - base_linked.message_post(body=Markup('⚠ responsible in merged error %s was "%s" and different from this one') % (error._get_form_link(), error.responsible.name)) - - if error.team_id and not base_error.team_id: - base_error.team_id = error.team_id - - base_error.message_post(body=Markup('Error %s was merged into this one') % error._get_form_link()) - error.message_post(body=Markup('Error was merged into %s') % base_linked._get_form_link()) - error.child_ids.parent_id = base_error - error.active = False + if error_content.error_id != base_error_content.error_id: + base_error.message_post(body=Markup('Error content coming from %s was merged into this one') % error_content.error_id._get_form_link()) + if not base_error.active and error_content.error_id.active: + base_error.active = True + errors |= error_content.error_id + error_content.unlink() + for error in errors: + error.message_post(body=Markup('Some error contents from this error where merged into %s') % base_error._get_form_link()) + if not error.error_content_ids: + base_error._merge(error) #################### # Actions #################### - def action_link_errors(self): + def action_link_errors_contents(self): """ Link errors with the first one of the recordset choosing parent in error with responsible, random bug and finally fisrt seen """ if len(self) < 2: return - self = self.with_context(active_test=False) - build_errors = self.search([('id', 'in', self.ids)], order='responsible asc, random desc, id asc') - build_errors[1:].write({'parent_id': build_errors[0].id}) + # sort self so that the first one is the one that has test tags or responsible, or the oldest. + self_sorted = self.sorted(lambda ec: (not ec.error_id.test_tags, not ec.error_id.responsible, ec.error_id.error_count, ec.id)) + base_error = self_sorted[0].error_id + base_error._merge(self_sorted.error_id - base_error) def action_clean_content(self): - _logger.info('Cleaning %s build errors', len(self)) + _logger.info('Cleaning %s build errorscontent', len(self)) cleaning_regs = self.env['runbot.error.regex'].search([('re_type', '=', 'cleaning')]) changed_fingerprints = set() - for build_error in self: - fingerprint_before = build_error.fingerprint - build_error.cleaned_content = cleaning_regs._r_sub(build_error.content) - if fingerprint_before != build_error.fingerprint: - changed_fingerprints.add(build_error.fingerprint) + for build_error_content in self: + fingerprint_before = build_error_content.fingerprint + build_error_content.cleaned_content = cleaning_regs._r_sub(build_error_content.content) + if fingerprint_before != build_error_content.fingerprint: + changed_fingerprints.add(build_error_content.fingerprint) # merge identical errors - errors_by_fingerprint = self.env['runbot.build.error'].search([('fingerprint', 'in', list(changed_fingerprints))]) + errors_content_by_fingerprint = self.env['runbot.build.error.content'].search([('fingerprint', 'in', list(changed_fingerprints))]) for fingerprint in changed_fingerprints: - errors_to_merge = errors_by_fingerprint.filtered(lambda r: r.fingerprint == fingerprint) - errors_to_merge._merge() - - def action_assign(self): - if not any((not record.responsible and not record.team_id and record.file_path and not record.parent_id) for record in self): - return - teams = self.env['runbot.team'].search(['|', ('path_glob', '!=', False), ('module_ownership_ids', '!=', False)]) - repos = self.env['runbot.repo'].search([]) - for record in self: - if not record.responsible and not record.team_id and record.file_path and not record.parent_id: - team = teams._get_team(record.file_path, repos) - if team: - record.team_id = team + errors_content_to_merge = errors_content_by_fingerprint.filtered(lambda r: r.fingerprint == fingerprint) + errors_content_to_merge._merge() class BuildErrorTag(models.Model): @@ -398,7 +506,7 @@ class BuildErrorTag(models.Model): _description = "Build error tag" name = fields.Char('Tag') - error_ids = fields.Many2many('runbot.build.error', string='Errors') + error_content_ids = fields.Many2many('runbot.build.error.content', string='Errors') class ErrorRegex(models.Model): diff --git a/runbot/models/commit.py b/runbot/models/commit.py index 95886d7d..29072028 100644 --- a/runbot/models/commit.py +++ b/runbot/models/commit.py @@ -113,7 +113,6 @@ class Commit(models.Model): _logger.info('git export: exporting to %s (already exists)', export_path) return export_path - _logger.info('git export: exporting to %s (new)', export_path) os.makedirs(export_path) diff --git a/runbot/models/ir_logging.py b/runbot/models/ir_logging.py index 5063dcc9..73ac447d 100644 --- a/runbot/models/ir_logging.py +++ b/runbot/models/ir_logging.py @@ -22,7 +22,7 @@ class IrLogging(models.Model): build_id = fields.Many2one('runbot.build', 'Build', index=True, ondelete='cascade') active_step_id = fields.Many2one('runbot.build.config.step', 'Active step', index=True) type = fields.Selection(selection_add=TYPES, string='Type', required=True, index=True, ondelete={t[0]: 'cascade' for t in TYPES}) - error_id = fields.Many2one('runbot.build.error', compute='_compute_known_error') # remember to never store this field + error_content_id = fields.Many2one('runbot.build.error.content', compute='_compute_known_error') # remember to never store this field dbname = fields.Char(string='Database Name', index=False) @api.model_create_multi @@ -57,12 +57,12 @@ class IrLogging(models.Model): cleaning_regexes = self.env['runbot.error.regex'].search([('re_type', '=', 'cleaning')]) fingerprints = defaultdict(list) for ir_logging in self: - ir_logging.error_id = False + ir_logging.error_content_id = False if ir_logging.level in ('ERROR', 'CRITICAL', 'WARNING') and ir_logging.type == 'server': - fingerprints[self.env['runbot.build.error']._digest(cleaning_regexes._r_sub(ir_logging.message))].append(ir_logging) - for build_error in self.env['runbot.build.error'].search([('fingerprint', 'in', list(fingerprints.keys()))], order='active asc'): - for ir_logging in fingerprints[build_error.fingerprint]: - ir_logging.error_id = build_error.id + fingerprints[self.env['runbot.build.error.content']._digest(cleaning_regexes._r_sub(ir_logging.message))].append(ir_logging) + for build_error_content in self.env['runbot.build.error.content'].search([('fingerprint', 'in', list(fingerprints.keys()))]).sorted(lambda ec: not ec.error_id.active): + for ir_logging in fingerprints[build_error_content.fingerprint]: + ir_logging.error_content_id = build_error_content.id def _prepare_create_values(self, vals_list): # keep the given create date @@ -160,9 +160,8 @@ class RunbotErrorLog(models.Model): return [] def _parse_logs(self): - BuildError = self.env['runbot.build.error'] ir_logs = self.env['ir.logging'].browse(self.ids) - return BuildError._parse_logs(ir_logs) + return self.env['runbot.build.error']._parse_logs(ir_logs) def init(self): """ Create an SQL view for ir.logging """ diff --git a/runbot/models/team.py b/runbot/models/team.py index 1216b8ef..5661108f 100644 --- a/runbot/models/team.py +++ b/runbot/models/team.py @@ -27,7 +27,7 @@ class RunbotTeam(models.Model): organisation = fields.Char('organisation', related="project_id.organisation") user_ids = fields.Many2many('res.users', string='Team Members', domain=[('share', '=', False)]) dashboard_id = fields.Many2one('runbot.dashboard', string='Dashboard') - build_error_ids = fields.One2many('runbot.build.error', 'team_id', string='Team Errors', domain=[('parent_id', '=', False)]) + assignment_ids = fields.One2many('runbot.build.error', 'team_id', string='Team Errors') path_glob = fields.Char( 'Module Wildcards', help='Comma separated list of `fnmatch` wildcards used to assign errors automaticaly\n' diff --git a/runbot/security/ir.model.access.csv b/runbot/security/ir.model.access.csv index d9df2e7b..3be65f90 100644 --- a/runbot/security/ir.model.access.csv +++ b/runbot/security/ir.model.access.csv @@ -21,6 +21,8 @@ access_runbot_config_step_upgrade_db_manager,runbot_config_step_upgrade_db_manag access_runbot_build_error_user,runbot_build_error_user,runbot.model_runbot_build_error,group_user,1,0,0,0 access_runbot_build_error_admin,runbot_build_error_admin,runbot.model_runbot_build_error,runbot.group_runbot_admin,1,1,1,1 +access_runbot_build_error_content_user,runbot_build_error_content_user,runbot.model_runbot_build_error_content,group_user,1,0,0,0 +access_runbot_build_error_content_admin,runbot_build_error_content_admin,runbot.model_runbot_build_error_content,runbot.group_runbot_admin,1,1,1,1 access_runbot_build_error_manager,runbot_build_error_manager,runbot.model_runbot_build_error,runbot.group_runbot_error_manager,1,1,1,1 access_runbot_build_error_link_user,runbot_runbot_build_error_link_user,runbot.model_runbot_build_error_link,group_user,1,0,0,0 access_runbot_build_error_link_admin,runbot_runbot_build_error_link_admin,runbot.model_runbot_build_error_link,runbot.group_runbot_admin,1,1,1,1 diff --git a/runbot/static/src/js/fields/fields.js b/runbot/static/src/js/fields/fields.js index 169f082e..5fa95487 100644 --- a/runbot/static/src/js/fields/fields.js +++ b/runbot/static/src/js/fields/fields.js @@ -7,10 +7,12 @@ import { Many2OneField } from "@web/views/fields/many2one/many2one_field"; import { _lt } from "@web/core/l10n/translation"; import { registry } from "@web/core/registry"; import { useDynamicPlaceholder } from "@web/views/fields/dynamic_placeholder_hook"; +import { standardFieldProps } from "@web/views/fields/standard_field_props"; import { useInputField } from "@web/views/fields/input_field_hook"; import { useRef, xml, Component } from "@odoo/owl"; import { useAutoresize } from "@web/core/utils/autoresize"; +import { getFormattedValue } from "@web/views/utils"; function stringify(obj) { @@ -62,16 +64,32 @@ registry.category("fields").add("runbotjsonb", { export class FrontendUrl extends Component { static template = xml` -
-
-
-
`; +
+ `; static components = { Many2OneField }; + static props = { + ...Many2OneField.props, + linkField: { type: String, optional: true }, + }; + + get baseProps() { + console.log(omit(this.props, 'linkField')) + return omit(this.props, 'linkField', 'context') + } + + get displayValue() { + return this.props.record.data[this.props.name] ? getFormattedValue(this.props.record, this.props.name, {}) : '' + } + get route() { - const model = this.props.relation || this.props.record.fields[this.props.name].relation; - const id = this.props.record.data[this.props.name][0]; + return this._route(this.props.linkField || this.props.name) + } + + _route(fieldName) { + const model = this.props.record.fields[fieldName].relation || "runbot.unknown"; + const id = this.props.record.data[fieldName][0]; if (model.startsWith('runbot.') ) { return '/runbot/' + model.split('.')[1] + '/' + id; } else { @@ -83,6 +101,11 @@ export class FrontendUrl extends Component { registry.category("fields").add("frontend_url", { supportedTypes: ["many2one"], component: FrontendUrl, + extractProps({ attrs, options }, dynamicInfo) { + return { + linkField: options.link_field, + }; + }, }); diff --git a/runbot/templates/build.xml b/runbot/templates/build.xml index aa70d223..edec42ca 100644 --- a/runbot/templates/build.xml +++ b/runbot/templates/build.xml @@ -333,7 +333,7 @@ - + @@ -342,21 +342,18 @@ - - - - - - - - - + + + - - This error is already . - - + + This error is already . + + + () diff --git a/runbot/templates/build_error.xml b/runbot/templates/build_error.xml index dac7f459..070d075e 100644 --- a/runbot/templates/build_error.xml +++ b/runbot/templates/build_error.xml @@ -7,8 +7,7 @@
Last seen date
-
Module
-
Summary
+
Summary
Triggers
Assigned to
&nbsp;
@@ -20,10 +19,9 @@
-
-
+
@@ -125,14 +123,14 @@
-

Team assigned Errors

+

Team assigned Errors

&nbsp;
- + team_errors
diff --git a/runbot/tests/test_build_error.py b/runbot/tests/test_build_error.py index d199e041..6309cdd4 100644 --- a/runbot/tests/test_build_error.py +++ b/runbot/tests/test_build_error.py @@ -55,8 +55,9 @@ class TestBuildError(RunbotCase): def setUp(self): super(TestBuildError, self).setUp() self.BuildError = self.env['runbot.build.error'] + self.BuildErrorContent = self.env['runbot.build.error.content'] self.BuildErrorLink = self.env['runbot.build.error.link'] - self.BuildErrorTeam = self.env['runbot.team'] + self.RunbotTeam = self.env['runbot.team'] self.ErrorRegex = self.env['runbot.error.regex'] self.IrLog = self.env['ir.logging'] @@ -67,37 +68,132 @@ class TestBuildError(RunbotCase): 're_type': 'cleaning', }) - error_x = self.BuildError.create({ + error_content = self.BuildErrorContent.create({ 'content': 'foo bar 242', }) expected = 'foo bar %' expected_hash = hashlib.sha256(expected.encode()).hexdigest() - self.assertEqual(error_x.cleaned_content, expected) - self.assertEqual(error_x.fingerprint, expected_hash) + self.assertEqual(error_content.cleaned_content, expected) + self.assertEqual(error_content.fingerprint, expected_hash) # Let's ensure that the fingerprint changes if we clean with an additional regex self.ErrorRegex.create({ 'regex': 'bar', 're_type': 'cleaning', }) - error_x.action_clean_content() + error_content.action_clean_content() expected = 'foo % %' expected_hash = hashlib.sha256(expected.encode()).hexdigest() - self.assertEqual(error_x.cleaned_content, expected) - self.assertEqual(error_x.fingerprint, expected_hash) + self.assertEqual(error_content.cleaned_content, expected) + self.assertEqual(error_content.fingerprint, expected_hash) - def test_merge(self): + def test_fields(self): + version_1 = self.Version.create({'name': '1.0'}) + version_2 = self.Version.create({'name': '2.0'}) + bundle_1 = self.Bundle.create({'name': 'v1', 'project_id': self.project.id}) + bundle_2 = self.Bundle.create({'name': 'v2', 'project_id': self.project.id}) + batch_1 = self.Batch.create({'bundle_id': bundle_1.id}) + batch_2 = self.Batch.create({'bundle_id': bundle_2.id}) + + params_1 = self.BuildParameters.create({ + 'version_id': version_1.id, + 'project_id': self.project.id, + 'config_id': self.default_config.id, + 'create_batch_id': batch_1.id, + }) + params_2 = self.BuildParameters.create({ + 'version_id': version_2.id, + 'project_id': self.project.id, + 'config_id': self.default_config.id, + 'create_batch_id': batch_2.id, + }) + + build_1 = self.Build.create({ + 'local_result': 'ko', + 'local_state': 'done', + 'params_id': params_1.id, + }) + build_2 = self.Build.create({ + 'local_result': 'ko', + 'local_state': 'done', + 'params_id': params_2.id, + }) + + self.env['runbot.batch.slot'].create({ + 'build_id': build_1.id, + 'batch_id': batch_1.id, + 'params_id': build_1.params_id.id, + 'link_type': 'created', + }) + self.env['runbot.batch.slot'].create({ + 'build_id': build_2.id, + 'batch_id': batch_2.id, + 'params_id': build_2.params_id.id, + 'link_type': 'created', + }) + + error = self.BuildError.create({}) + error_content_1 = self.BuildErrorContent.create({'content': 'foo bar v1', 'error_id': error.id}) + error_content_2 = self.BuildErrorContent.create({'content': 'foo bar v2', 'error_id': error.id}) + error_content_2b = self.BuildErrorContent.create({'content': 'bar v2', 'error_id': error.id}) + l_1 = self.BuildErrorLink.create({'build_id': build_1.id, 'error_content_id': error_content_1.id}) + l_2 = self.BuildErrorLink.create({'build_id': build_2.id, 'error_content_id': error_content_2.id}) + l_3 = self.BuildErrorLink.create({'build_id': build_2.id, 'error_content_id': error_content_2b.id}) + + self.assertEqual(error_content_1.build_ids, build_1) + self.assertEqual(error_content_2.build_ids, build_2) + self.assertEqual(error_content_2b.build_ids, build_2) + self.assertEqual(error.build_ids, build_1 | build_2) + + self.assertEqual(error_content_1.bundle_ids, bundle_1) + self.assertEqual(error_content_2.bundle_ids, bundle_2) + self.assertEqual(error_content_2b.bundle_ids, bundle_2) + self.assertEqual(error.bundle_ids, bundle_1 | bundle_2) + + self.assertEqual(error_content_1.version_ids, version_1) + self.assertEqual(error_content_2.version_ids, version_2) + self.assertEqual(error_content_2b.version_ids, version_2) + self.assertEqual(error.version_ids, version_1 | version_2) + + self.assertEqual(error_content_1.build_error_link_ids, l_1) + self.assertEqual(error_content_2.build_error_link_ids, l_2) + self.assertEqual(error_content_2b.build_error_link_ids, l_3) + self.assertEqual(error.build_error_link_ids, l_1 | l_2 | l_3) + self.assertEqual(error.unique_build_error_link_ids, l_1 | l_2) + + def test_merge_test_tags(self): + error_a = self.BuildError.create({ + 'content': 'foo', + }) + error_b = self.BuildError.create({ + 'content': 'bar', + 'test_tags': 'blah', + }) + + self.assertEqual(self.BuildError._disabling_tags(), ['-blah']) + + error_a._merge(error_b) + + self.assertEqual(self.BuildError._disabling_tags(), ['-blah']) + self.assertEqual(error_a.test_tags, 'blah') + self.assertEqual(error_b.test_tags, False) + self.assertEqual(error_b.active, False) + + def test_merge_contents(self): build_a = self.create_test_build({'local_result': 'ko', 'local_state': 'done'}) - error_a = self.BuildError.create({'content': 'foo bar'}) - self.BuildErrorLink.create({'build_id': build_a.id, 'build_error_id': error_a.id}) + error_content_a = self.BuildErrorContent.create({'content': 'foo bar'}) + self.BuildErrorLink.create({'build_id': build_a.id, 'error_content_id': error_content_a.id}) + error_a = error_content_a.error_id build_b = self.create_test_build({'local_result': 'ko', 'local_state': 'done'}) - error_b = self.BuildError.create({'content': 'foo bar'}) - self.BuildErrorLink.create({'build_id': build_b.id, 'build_error_id': error_b.id}) - - (error_a | error_b)._merge() - self.assertEqual(len(self.BuildError.search([('fingerprint', '=', error_a.fingerprint)])), 1) + error_content_b = self.BuildErrorContent.create({'content': 'foo bar'}) + self.BuildErrorLink.create({'build_id': build_b.id, 'error_content_id': error_content_b.id}) + error_b = error_content_b.error_id + self.assertNotEqual(error_a, error_b) + self.assertEqual(self.BuildErrorContent.search([('fingerprint', '=', error_content_a.fingerprint)]), error_content_a | error_content_b) + (error_content_a | error_content_b)._merge() + self.assertEqual(self.BuildErrorContent.search([('fingerprint', '=', error_content_a.fingerprint)]), error_content_a) self.assertTrue(error_a.active, 'The first merged error should stay active') self.assertFalse(error_b.active, 'The second merged error should have stay deactivated') self.assertIn(build_a, error_a.build_error_link_ids.build_id) @@ -107,50 +203,82 @@ class TestBuildError(RunbotCase): self.assertFalse(error_b.build_error_link_ids) self.assertFalse(error_b.build_ids) - error_c = self.BuildError.create({'content': 'foo foo'}) + error_content_c = self.BuildErrorContent.create({'content': 'foo foo'}) # let's ensure we cannot merge errors with different fingerprints with self.assertRaises(AssertionError): - (error_a | error_c)._merge() + (error_content_a | error_content_c)._merge() # merge two build errors while the build <--> build_error relation already exists - error_d = self.BuildError.create({'content': 'foo bar'}) - self.BuildErrorLink.create({'build_id': build_a.id, 'build_error_id': error_d.id}) - (error_a | error_d)._merge() - self.assertIn(build_a, error_a.build_error_link_ids.build_id) - self.assertIn(build_a, error_a.build_ids) - self.assertFalse(error_d.build_error_link_ids) - self.assertFalse(error_d.build_ids) - - def test_merge_linked(self): - top_error = self.BuildError.create({'content': 'foo foo', 'active': False}) + error_content_d = self.BuildErrorContent.create({'content': 'foo bar'}) + self.BuildErrorLink.create({'build_id': build_a.id, 'error_content_id': error_content_d.id}) + (error_content_a | error_content_d)._merge() + self.assertIn(build_a, error_content_a.build_error_link_ids.build_id) + self.assertIn(build_a, error_content_a.build_ids) + self.assertFalse(error_content_d.build_error_link_ids) + self.assertFalse(error_content_d.build_ids) + def test_merge_simple(self): build_a = self.create_test_build({'local_result': 'ko', 'local_state': 'done'}) - error_a = self.BuildError.create({'content': 'foo bar', 'parent_id': top_error.id }) - self.BuildErrorLink.create({'build_id': build_a.id, 'build_error_id': error_a.id}) - + error_content_a = self.BuildErrorContent.create({'content': 'foo bar'}) + error_a = error_content_a.error_id + error_a.active = False + self.BuildErrorLink.create({'build_id': build_a.id, 'error_content_id': error_content_a.id}) build_b = self.create_test_build({'local_result': 'ko', 'local_state': 'done'}) - error_b = self.BuildError.create({'content': 'foo bar', 'test_tags': 'footag'}) - self.BuildErrorLink.create({'build_id': build_b.id, 'build_error_id': error_b.id}) + error_content_b = self.BuildErrorContent.create({'content': 'foo bar'}) + error_b = error_content_b.error_id + error_b.test_tags = 'footag' + self.BuildErrorLink.create({'build_id': build_b.id, 'error_content_id': error_content_b.id}) - linked_error = self.BuildError.create({'content': 'foo foo bar', 'parent_id': error_b.id}) + self.assertEqual(self.BuildErrorContent.search([('fingerprint', '=', error_content_a.fingerprint)]), error_content_a | error_content_b) + (error_content_a | error_content_b)._merge() + self.assertEqual(self.BuildErrorContent.search([('fingerprint', '=', error_content_a.fingerprint)]), error_content_a) + self.assertFalse(error_b.error_content_ids) - (error_a | error_b)._merge() - self.assertEqual(len(self.BuildError.search([('fingerprint', '=', error_a.fingerprint)])), 1) - self.assertTrue(error_a.active, 'The first merged error should stay active') - self.assertFalse(error_b.active, 'The second merged error should have stay deactivated') - self.assertIn(build_a, error_a.build_ids) - self.assertIn(build_b, error_a.build_ids) - self.assertFalse(error_b.build_ids) - self.assertEqual(top_error.test_tags, 'footag') - self.assertEqual(top_error.active, True) - self.assertEqual(linked_error.parent_id, error_a, 'Linked errors to a merged one should be now linked to the new one') + self.assertTrue(error_a.active, 'The merged error without test tags should have been deactivated') + self.assertEqual(error_a.test_tags, 'footag', 'Tags should have been transfered from b to a') + self.assertFalse(error_b.active, 'The merged error with test tags should remain active') + self.assertIn(build_a, error_content_a.build_ids) + self.assertIn(build_b, error_content_a.build_ids) + self.assertFalse(error_content_b.build_ids) + self.assertEqual(error_a.active, True) - tagged_error = self.BuildError.create({'content': 'foo foo', 'test_tags': 'bartag'}) - (top_error | tagged_error)._merge() - self.assertTrue(top_error.active) + tagged_error_content = self.BuildErrorContent.create({'content': 'foo bar'}) + tagged_error = tagged_error_content.error_id + tagged_error.test_tags = 'bartag' + (error_content_a | tagged_error_content)._merge() + self.assertEqual(error_a.test_tags, 'footag') + self.assertEqual(tagged_error.test_tags, 'bartag') + self.assertTrue(error_a.active) self.assertTrue(tagged_error.active, 'A differently tagged error cannot be deactivated by the merge') + def test_merge_linked(self): + build_a = self.create_test_build({'local_result': 'ko', 'local_state': 'done'}) + error_content_a = self.BuildErrorContent.create({'content': 'foo bar'}) + error_a = error_content_a.error_id + error_a.active = False + self.BuildErrorLink.create({'build_id': build_a.id, 'error_content_id': error_content_a.id}) + build_b = self.create_test_build({'local_result': 'ko', 'local_state': 'done'}) + error_content_b = self.BuildErrorContent.create({'content': 'foo bar'}) + error_b = error_content_b.error_id + error_b.test_tags = 'footag' + self.BuildErrorLink.create({'build_id': build_b.id, 'error_content_id': error_content_b.id}) + + linked_error = self.BuildErrorContent.create({'content': 'foo foo bar', 'error_id': error_b.id}) + + self.assertEqual(self.BuildErrorContent.search([('fingerprint', '=', error_content_a.fingerprint)]), error_content_a | error_content_b) + (error_content_a | error_content_b)._merge() + self.assertEqual(self.BuildErrorContent.search([('fingerprint', '=', error_content_a.fingerprint)]), error_content_a) + self.assertEqual(error_b.error_content_ids, linked_error) + self.assertTrue(error_a.active, 'Main error should have been reactivated') + self.assertEqual(error_a.test_tags, False, 'Tags should remain on b') + self.assertEqual(error_b.test_tags, 'footag', 'Tags should remain on b') + self.assertTrue(error_b.active, 'The merged error with test tags should remain active') + self.assertIn(build_a, error_content_a.build_ids) + self.assertIn(build_b, error_content_a.build_ids) + self.assertFalse(error_content_b.build_ids) + self.assertEqual(error_a.active, True) + self.assertEqual(linked_error.error_id, error_b) def test_build_scan(self): ko_build = self.create_test_build({'local_result': 'ok', 'local_state': 'testing'}) @@ -168,7 +296,7 @@ class TestBuildError(RunbotCase): 'replacement': "''", }) - error_team = self.BuildErrorTeam.create({ + error_team = self.RunbotTeam.create({ 'name': 'test-error-team', 'path_glob': '*/test_ui.py' }) @@ -193,22 +321,24 @@ class TestBuildError(RunbotCase): ok_build._parse_logs() build_error = ko_build.build_error_ids self.assertTrue(build_error) - self.assertTrue(build_error.fingerprint.startswith('af0e88f3')) - self.assertTrue(build_error.cleaned_content.startswith('%'), 'The cleaner should have replace "FAIL: " with a "%" sign by default') - self.assertFalse('^' in build_error.cleaned_content, 'The cleaner should have removed the "^" chars') - error_link = self.env['runbot.build.error.link'].search([('build_id', '=', ko_build.id), ('build_error_id', '=', build_error.id)]) + error_content = build_error.error_content_ids + self.assertTrue(error_content.fingerprint.startswith('af0e88f3')) + self.assertTrue(error_content.cleaned_content.startswith('%'), 'The cleaner should have replace "FAIL: " with a "%" sign by default') + self.assertFalse('^' in error_content.cleaned_content, 'The cleaner should have removed the "^" chars') + error_link = self.env['runbot.build.error.link'].search([('build_id', '=', ko_build.id), ('error_content_id', '=', error_content.id)]) self.assertTrue(error_link, 'An error link should exists') - self.assertIn(ko_build, build_error.build_error_link_ids.mapped('build_id'), 'Ko build should be in build_error_link_ids') + self.assertIn(ko_build, error_content.build_ids, 'Ko build should be in build_error_link_ids') self.assertEqual(error_link.log_date, fields.Datetime.from_string('2023-08-29 00:46:21')) - self.assertIn(ko_build, build_error.build_ids, 'The parsed build should be added to the runbot.build.error') + self.assertIn(ko_build, error_content.build_ids, 'The parsed build should be added to the runbot.build.error') self.assertFalse(self.BuildErrorLink.search([('build_id', '=', ok_build.id)]), 'A successful build should not be associated to a runbot.build.error') - self.assertEqual(error_team, build_error.team_id) + self.assertEqual(error_content.file_path, '/data/build/server/addons/web_studio/tests/test_ui.py') + self.assertEqual(build_error.team_id, error_team) # Test that build with same error is added to the errors ko_build_same_error = self.create_test_build({'local_result': 'ko'}) self.create_log({'create_date': fields.Datetime.from_string('2023-08-29 01:46:21'), 'message': RTE_ERROR, 'build_id': ko_build_same_error.id}) ko_build_same_error._parse_logs() - self.assertIn(ko_build_same_error, build_error.build_ids, 'The parsed build should be added to the existing runbot.build.error') + self.assertIn(ko_build_same_error, error_content.build_ids, 'The parsed build should be added to the existing runbot.build.error') # Test that line numbers does not interfere with error recognition ko_build_diff_number = self.create_test_build({'local_result': 'ko'}) @@ -224,9 +354,9 @@ class TestBuildError(RunbotCase): self.create_log({'create_date': fields.Datetime.from_string('2023-08-29 01:46:21'), 'message': RTE_ERROR, 'build_id': ko_build_new.id}) ko_build_new._parse_logs() self.assertNotIn(ko_build_new, build_error.build_ids, 'The parsed build should not be added to a fixed runbot.build.error') - new_build_error = self.BuildErrorLink.search([('build_id', '=', ko_build_new.id)]).mapped('build_error_id') + new_build_error = self.BuildErrorLink.search([('build_id', '=', ko_build_new.id)]).error_content_id.error_id self.assertIn(ko_build_new, new_build_error.build_ids, 'The parsed build with a re-apearing error should generate a new runbot.build.error') - self.assertIn(build_error, new_build_error.error_history_ids, 'The old error should appear in history') + self.assertEqual(build_error, new_build_error.previous_error_id, 'The old error should appear in history') def test_seen_date(self): # create all the records before the tests to evaluate compute dependencies @@ -261,9 +391,9 @@ class TestBuildError(RunbotCase): # a new build error is linked to the current one build_c._parse_logs() build_error_c = build_c.build_error_ids - self.assertNotIn(build_c, build_error_a.children_build_ids) - build_error_c.parent_id = build_error_a - self.assertIn(build_c, build_error_a.children_build_ids) + self.assertNotIn(build_c, build_error_a.build_ids) + build_error_a._merge(build_error_c) + self.assertIn(build_c, build_error_a.build_ids) self.assertEqual(build_error_a.last_seen_date, child_seen_date) self.assertEqual(build_error_a.last_seen_build_id, build_c) @@ -276,40 +406,28 @@ class TestBuildError(RunbotCase): build_a = self.create_test_build({'local_result': 'ko'}) build_b = self.create_test_build({'local_result': 'ko'}) - error_a = self.env['runbot.build.error'].create({ + error_content_a = self.env['runbot.build.error.content'].create({ 'content': 'foo', - 'active': False # Even a fixed error coul be linked }) - self.BuildErrorLink.create({'build_id': build_a.id, 'build_error_id': error_a.id}) - - error_b = self.env['runbot.build.error'].create({ + self.BuildErrorLink.create({'build_id': build_a.id, 'error_content_id': error_content_a.id}) + error_content_b = self.env['runbot.build.error.content'].create({ 'content': 'bar', 'random': True }) - - self.BuildErrorLink.create({'build_id': build_b.id, 'build_error_id': error_b.id}) + self.BuildErrorLink.create({'build_id': build_b.id, 'error_content_id': error_content_b.id}) # test that the random bug is parent when linking errors - all_errors = error_a | error_b - all_errors.action_link_errors() - self.assertEqual(error_b.child_ids, error_a, 'Random error should be the parent') - - # Test that changing bug resolution is propagated to children - error_b.active = True - self.assertTrue(error_a.active) - error_b.active = False - self.assertFalse(error_a.active) + self.assertNotEqual(error_content_a.error_id, error_content_b.error_id) + all_errors = error_content_a | error_content_b + all_errors.action_link_errors_contents() + self.assertEqual(error_content_a.error_id, error_content_b.error_id, 'Error should be linked') # Test build_ids - self.assertIn(build_b, error_b.build_ids) - self.assertNotIn(build_a, error_b.build_ids) - - # Test that children builds contains all builds - self.assertIn(build_b, error_b.children_build_ids) - self.assertIn(build_a, error_b.children_build_ids) - self.assertEqual(error_a.build_count, 1) - self.assertEqual(error_b.build_count, 2) + self.assertEqual(build_a, error_content_a.build_ids) + self.assertEqual(build_b, error_content_b.build_ids) + error = error_content_a.error_id + self.assertEqual(build_a | build_b, error.build_ids) def test_build_error_test_tags_no_version(self): build_a = self.create_test_build({'local_result': 'ko'}) @@ -337,12 +455,6 @@ class TestBuildError(RunbotCase): # test that test tags on fixed errors are not taken into account self.assertNotIn('-blah', self.BuildError._disabling_tags()) - error_a.test_tags = False - error_b.active = True - error_b.parent_id = error_a.id - self.assertEqual(error_b.test_tags, False) - self.assertEqual(self.BuildError._disabling_tags(), ['-blah']) - def test_build_error_test_tags_min_max_version(self): version_17 = self.Version.create({'name': '17.0'}) version_saas_171 = self.Version.create({'name': 'saas-17.1'}) @@ -389,7 +501,7 @@ class TestBuildError(RunbotCase): self.assertEqual(sorted(['-every', '-where', '-tag_17_up_to_master']), sorted(self.BuildError._disabling_tags(build_master))) def test_build_error_team_wildcards(self): - website_team = self.BuildErrorTeam.create({ + website_team = self.RunbotTeam.create({ 'name': 'website_test', 'path_glob': '*website*,-*website_sale*' }) @@ -402,11 +514,11 @@ class TestBuildError(RunbotCase): self.assertEqual(website_team, teams._get_team('/data/build/odoo/addons/website/tests/test_ui')) def test_build_error_team_ownership(self): - website_team = self.BuildErrorTeam.create({ + website_team = self.RunbotTeam.create({ 'name': 'website_test', 'path_glob': '' }) - sale_team = self.BuildErrorTeam.create({ + sale_team = self.RunbotTeam.create({ 'name': 'sale_test', 'path_glob': '' }) diff --git a/runbot/views/build_error_views.xml b/runbot/views/build_error_views.xml index e2b52c6d..c0cb41da 100644 --- a/runbot/views/build_error_views.xml +++ b/runbot/views/build_error_views.xml @@ -1,35 +1,115 @@ + + runbot.build.error.form + runbot.build.error + +
+ + +