[IMP] runbot: better log listing

The current logic to define if a build has logs or not is based on
is_docker_step. This related logic is not always valid, since it is
based on step type and step content, but there are many way to have a
result that could be a docker start, one of them being to call another
step. The main issue is that we don't always now if this other step is a
docker step or not before runtime. Moreover the logic becamore more and
more complex adding more conditions like commands, _run_, docker_params,
...

Moreover, the log list is completed before the build start, meaning that
if the build is killed before completion, some logs may be missing but
will be listed. This is also an issue when trying to access logs before
the correspondong step ran.

This commit changes the logic by adding the log file to the list (and
start log) when starting a docker, wich should give a more consistent
result.
This commit is contained in:
Xavier-Do 2024-04-22 14:43:37 +02:00
parent 0eae784a1e
commit 1052baeadd
3 changed files with 16 additions and 26 deletions

View File

@ -181,7 +181,7 @@ class BuildResult(models.Model):
log_ids = fields.One2many('ir.logging', 'build_id', string='Logs')
error_log_ids = fields.One2many('ir.logging', 'build_id', domain=[('level', 'in', ['WARNING', 'ERROR', 'CRITICAL'])], string='Error Logs')
stat_ids = fields.One2many('runbot.build.stat', 'build_id', string='Statistics values')
log_list = fields.Char('Comma separted list of step_ids names with logs', compute="_compute_log_list", store=True)
log_list = fields.Char('Comma separted list of step_ids names with logs')
active_step = fields.Many2one('runbot.build.config.step', 'Active step')
job = fields.Char('Active step display name', compute='_compute_job')
@ -250,12 +250,6 @@ class BuildResult(models.Model):
for record in self:
record.host_id = get_host(record.host)
@api.depends('params_id.config_id')
def _compute_log_list(self): # storing this field because it will be access trhoug repo viewn and keep track of the list at create
for build in self:
build.log_list = ','.join({step.name for step in build.params_id.config_id.step_ids if step._has_log()})
# TODO replace logic, add log file to list when executed (avoid 404, link log on docker start, avoid fake is_docker_step)
@api.depends('children_ids.global_state', 'local_state')
def _compute_global_state(self):
for record in self:
@ -742,8 +736,8 @@ class BuildResult(models.Model):
build._log('_schedule', '%s time exceeded (%ss)' % (build.active_step.name if build.active_step else "?", build.job_time))
build._kill(result='killed')
return False
elif _docker_state in ('UNKNOWN', 'GHOST') and (build.local_state == 'running' or build.active_step._is_docker_step()): # todo replace with docker_start
docker_time = time.time() - dt2time(build.docker_start or build.job_start)
elif _docker_state in ('UNKNOWN', 'GHOST') and build.docker_start:
docker_time = time.time() - dt2time(build.docker_start)
if docker_time < 5:
return False
elif docker_time < 60:
@ -984,7 +978,7 @@ class BuildResult(models.Model):
self.ensure_one()
_logger.info("Build %s %s %s", self.id, func, message)
self.env['ir.logging'].create({
return self.env['ir.logging'].create({
'build_id': self.id,
'level': level,
'type': log_type,

View File

@ -256,13 +256,18 @@ class ConfigStep(models.Model):
def _run(self, build):
build.write({'job_start': now(), 'job_end': False}) # state, ...
log_link = ''
if self._has_log():
log = build._log('run', f'Starting step **{self.name}** from config **{build.params_id.config_id.name}**', log_type='markdown', level='SEPARATOR')
result = self._run_step(build)
if callable(result): # docker step, should have text logs
if build.log_list:
build.log_list = f'{build.log_list},{self.name}'
else:
build.log_list = self.name
log_url = f'http://{build.host}'
url = f"{log_url}/runbot/static/build/{build.dest}/logs/{self.name}.txt"
log_link = f'[@icon-file-text]({url})'
build._log('run', 'Starting step **%s** from config **%s** %s' % (self.name, build.params_id.config_id.name, log_link), log_type='markdown', level='SEPARATOR')
return self._run_step(build)
log.message = f'{log.message} {log_link}'
return result
def _run_step(self, build, **kwargs):
build.log_counter = self.env['ir.config_parameter'].sudo().get_param('runbot.runbot_maxlogs', 100)
@ -337,12 +342,6 @@ class ConfigStep(models.Model):
else:
raise
def _is_docker_step(self):
if not self:
return False
self.ensure_one()
return self.job_type in ('install_odoo', 'run_odoo', 'restore', 'test_upgrade') or (self.job_type == 'python' and ('docker_params =' in self.python_code or '_run_' in self.python_code or 'cmd' in self.python_code))
def _run_run_odoo(self, build, force=False):
if not force:
if build.parent_id:
@ -1113,10 +1112,6 @@ class ConfigStep(models.Model):
return 'running'
return 'testing'
def _has_log(self):
self.ensure_one()
return self._is_docker_step()
def _check_limits(self, build):
bundle = build.params_id.create_batch_id.bundle_id
commit_limit = bundle.commit_limit or self.commit_limit

View File

@ -28,7 +28,8 @@ class TestSchedule(RunbotCase):
'host': host.name,
'job_start': datetime.datetime.now(),
'active_step': self.env.ref('runbot.runbot_build_config_step_run').id,
'params_id': params.id
'docker_start': datetime.datetime.now(),
'params_id': params.id,
})
mock_docker_state.return_value = 'UNKNOWN'
self.assertEqual(build.local_state, 'testing')
@ -37,7 +38,7 @@ class TestSchedule(RunbotCase):
self.assertEqual(build.local_result, 'ok')
self.start_patcher('fetch_local_logs', 'odoo.addons.runbot.models.host.Host._fetch_local_logs', []) # the local logs have to be empty
build.write({'job_start': datetime.datetime.now() - datetime.timedelta(seconds=70)}) # docker never started
build.write({'docker_start': datetime.datetime.now() - datetime.timedelta(seconds=70)}) # docker never started
build._schedule()
self.assertEqual(build.local_state, 'done')
self.assertEqual(build.local_result, 'ko')