mirror of
https://github.com/odoo/runbot.git
synced 2025-03-15 23:45:44 +07:00
[IMP] info: log level debug to info
runbot servers are running with a log-level debug in order to have usefull debug information, but this also causes some noise comming from odoo. This pr changes most debug to info.
This commit is contained in:
parent
011bdc00e2
commit
87214a82a6
@ -142,7 +142,7 @@ def _docker_run(cmd=False, log_path=False, build_dir=False, container_name=False
|
||||
run_cmd = cmd_object.build()
|
||||
else:
|
||||
cmd_object = Command([], run_cmd.split(' '), [])
|
||||
_logger.debug('Docker run command: %s', run_cmd)
|
||||
_logger.info('Docker run command: %s', run_cmd)
|
||||
logs = open(log_path, 'w')
|
||||
run_cmd = 'cd /data/build;touch start-%s;%s;cd /data/build;touch end-%s' % (container_name, run_cmd, container_name)
|
||||
docker_clear_state(container_name, build_dir) # ensure that no state are remaining
|
||||
|
@ -205,9 +205,10 @@ class Runbot(Controller):
|
||||
'/runbot/bundle/<model("runbot.bundle"):bundle>/force',
|
||||
'/runbot/bundle/<model("runbot.bundle"):bundle>/force/<int:auto_rebase>',
|
||||
], type='http', auth="user", methods=['GET', 'POST'], csrf=False)
|
||||
def force_bundle(self, bundle, auto_rebase=False, **post):
|
||||
def force_bundle(self, bundle, auto_rebase=False, **_post):
|
||||
_logger.info('user %s forcing bundle %s', request.env.user.name, bundle.name) # user must be able to read bundle
|
||||
batch = bundle.sudo()._force()
|
||||
batch._log('Batch forced by %s', request.env.user.name)
|
||||
batch._prepare(auto_rebase)
|
||||
return werkzeug.utils.redirect('/runbot/batch/%s' % batch.id)
|
||||
|
||||
|
@ -14,6 +14,7 @@ class Hook(http.Controller):
|
||||
|
||||
@http.route(['/runbot/hook/<int:remote_id>', '/runbot/hook/org'], type='http', auth="public", website=True, csrf=False)
|
||||
def hook(self, remote_id=None, **post):
|
||||
_logger.info('Hook on %s', remote_id)
|
||||
event = request.httprequest.headers.get("X-Github-Event")
|
||||
payload = json.loads(request.params.get('payload', '{}'))
|
||||
if remote_id is None:
|
||||
@ -39,7 +40,7 @@ class Hook(http.Controller):
|
||||
if payload and payload.get('action', '') == 'edited' and 'base' in payload.get('changes'):
|
||||
# handle PR that have been re-targeted
|
||||
branch._compute_branch_infos(payload.get('pull_request', {}))
|
||||
_logger.info('retargeting %s to %s', branch.name, branch.target_branch_name)
|
||||
_logger.info('Retargeting %s to %s', branch.name, branch.target_branch_name)
|
||||
base = request.env['runbot.bundle'].search([
|
||||
('name', '=', branch.target_branch_name),
|
||||
('is_base', '=', True),
|
||||
@ -58,7 +59,7 @@ class Hook(http.Controller):
|
||||
_logger.info('Closing pr %s', branch.name)
|
||||
branch.alive = False
|
||||
else:
|
||||
_logger.debug('Ignoring unsupported pull request operation %s %s', event, payload.get('action', ''))
|
||||
_logger.info('Ignoring unsupported pull request operation %s %s', event, payload.get('action', ''))
|
||||
elif event == 'delete':
|
||||
if payload.get('ref_type') == 'branch':
|
||||
branch_ref = payload.get('ref')
|
||||
@ -66,5 +67,5 @@ class Hook(http.Controller):
|
||||
branch = request.env['runbot.branch'].sudo().search([('remote_id', '=', remote.id), ('name', '=', branch_ref)])
|
||||
branch.alive = False
|
||||
else:
|
||||
_logger.debug('Ignoring unsupported hook %s %s', event, payload.get('action', ''))
|
||||
_logger.info('Ignoring unsupported hook %s %s', event, payload.get('action', ''))
|
||||
return ""
|
||||
|
@ -481,13 +481,13 @@ class BuildResult(models.Model):
|
||||
elif dest != hide_in_logs:
|
||||
ignored.add(dest)
|
||||
if ignored:
|
||||
_logger.debug('%s (%s) not deleted because not dest format', label, list(ignored))
|
||||
_logger.info('%s (%s) not deleted because not dest format', label, list(ignored))
|
||||
builds = self.browse(dest_by_builds_ids)
|
||||
existing = builds.exists()
|
||||
remaining = (builds - existing)
|
||||
if remaining:
|
||||
dest_list = [dest for sublist in [dest_by_builds_ids[rem_id] for rem_id in remaining.ids] for dest in sublist]
|
||||
_logger.debug('(%s) (%s) not deleted because no corresponding build found', label, " ".join(dest_list))
|
||||
_logger.info('(%s) (%s) not deleted because no corresponding build found', label, " ".join(dest_list))
|
||||
for build in existing:
|
||||
if fields.Datetime.from_string(build.gc_date) < datetime.datetime.now():
|
||||
if build.local_state == 'done':
|
||||
@ -501,7 +501,7 @@ class BuildResult(models.Model):
|
||||
Remove datadir and drop databases of build older than db_gc_days or db_gc_days_child.
|
||||
If force is set to True, does the same cleaning based on recordset without checking build age.
|
||||
"""
|
||||
_logger.debug('Local cleaning')
|
||||
_logger.info('Local cleaning')
|
||||
_filter = self._filter_to_clean
|
||||
additionnal_conditions = []
|
||||
|
||||
@ -512,7 +512,7 @@ class BuildResult(models.Model):
|
||||
if build and build in self:
|
||||
yield dest
|
||||
elif not build:
|
||||
_logger.debug('%s (%s) skipped because not dest format', label, dest)
|
||||
_logger.info('%s (%s) skipped because not dest format', label, dest)
|
||||
_filter = filter_ids
|
||||
for _id in self.exists().ids:
|
||||
additionnal_conditions.append("datname like '%s-%%'" % _id)
|
||||
@ -564,7 +564,7 @@ class BuildResult(models.Model):
|
||||
l = list(l)
|
||||
for build in self:
|
||||
l[0] = "%s %s" % (build.dest, l[0])
|
||||
_logger.debug(*l)
|
||||
_logger.info(*l)
|
||||
|
||||
def _get_docker_name(self):
|
||||
self.ensure_one()
|
||||
@ -667,7 +667,7 @@ class BuildResult(models.Model):
|
||||
if build.job_time < 5:
|
||||
continue
|
||||
elif build.job_time < 60:
|
||||
_logger.debug('container "%s" seems too take a while to start :%s' % (build.job_time, build._get_docker_name()))
|
||||
_logger.info('container "%s" seems too take a while to start :%s' % (build.job_time, build._get_docker_name()))
|
||||
continue
|
||||
else:
|
||||
build._log('_schedule', 'Docker with state %s not started after 60 seconds, skipping' % _docker_state, level='ERROR')
|
||||
@ -824,14 +824,14 @@ class BuildResult(models.Model):
|
||||
datadir = appdirs.user_data_dir()
|
||||
paths = [os.path.join(datadir, pn, 'filestore', dbname) for pn in 'OpenERP Odoo'.split()]
|
||||
cmd = ['rm', '-rf'] + paths
|
||||
_logger.debug(' '.join(cmd))
|
||||
_logger.info(' '.join(cmd))
|
||||
subprocess.call(cmd)
|
||||
|
||||
def _local_pg_createdb(self, dbname):
|
||||
icp = self.env['ir.config_parameter']
|
||||
db_template = icp.get_param('runbot.runbot_db_template', default='template0')
|
||||
self._local_pg_dropdb(dbname)
|
||||
_logger.debug("createdb %s", dbname)
|
||||
_logger.info("createdb %s", dbname)
|
||||
with local_pgadmin_cursor() as local_cr:
|
||||
local_cr.execute(sql.SQL("""CREATE DATABASE {} TEMPLATE %s LC_COLLATE 'C' ENCODING 'unicode'""").format(sql.Identifier(dbname)), (db_template,))
|
||||
self.env['runbot.database'].create({'name': dbname, 'build_id': self.id})
|
||||
@ -842,7 +842,7 @@ class BuildResult(models.Model):
|
||||
message = message[:300000] + '[Truncate, message too long]'
|
||||
|
||||
self.ensure_one()
|
||||
_logger.debug("Build %s %s %s", self.id, func, message)
|
||||
_logger.info("Build %s %s %s", self.id, func, message)
|
||||
self.env['ir.logging'].create({
|
||||
'build_id': self.id,
|
||||
'level': level,
|
||||
@ -1104,7 +1104,7 @@ class BuildResult(models.Model):
|
||||
# TODO maybe avoid to send status if build is killable (another new build exist and will send the status)
|
||||
if build.parent_id:
|
||||
if build.orphan_result:
|
||||
_logger.debug('Skipping result for orphan build %s', self.id)
|
||||
_logger.info('Skipping result for orphan build %s', self.id)
|
||||
else:
|
||||
build.parent_id._github_status(post_commit)
|
||||
elif build.params_id.config_id == build.params_id.trigger_id.config_id:
|
||||
@ -1117,7 +1117,7 @@ class BuildResult(models.Model):
|
||||
if build.global_result == 'ok':
|
||||
state = 'success'
|
||||
else:
|
||||
_logger.debug("skipping github status for build %s ", build.id)
|
||||
_logger.info("skipping github status for build %s ", build.id)
|
||||
continue
|
||||
|
||||
runbot_domain = self.env['runbot.runbot']._domain()
|
||||
|
@ -205,7 +205,7 @@ class CommitStatus(models.Model):
|
||||
|
||||
def send_github_status(env):
|
||||
for remote in env['runbot.remote'].browse(remote_ids):
|
||||
_logger.debug(
|
||||
_logger.info(
|
||||
"github updating %s status %s to %s in repo %s",
|
||||
status['context'], commit_name, status['state'], remote.name)
|
||||
remote._github('/repos/:owner/:repo/statuses/%s' % commit_name, status, ignore_errors=True)
|
||||
|
@ -102,7 +102,7 @@ class Host(models.Model):
|
||||
return int(icp.get_param('runbot.runbot_running_max', default=5))
|
||||
|
||||
def set_psql_conn_count(self):
|
||||
_logger.debug('Updating psql connection count...')
|
||||
_logger.info('Updating psql connection count...')
|
||||
self.ensure_one()
|
||||
with local_pgadmin_cursor() as local_cr:
|
||||
local_cr.execute("SELECT sum(numbackends) FROM pg_stat_database;")
|
||||
|
@ -173,21 +173,21 @@ class Runbot(models.AbstractModel):
|
||||
with open(nginx_conf_path, 'rb') as f:
|
||||
content = f.read()
|
||||
if content != nginx_config:
|
||||
_logger.debug('reload nginx')
|
||||
_logger.info('reload nginx')
|
||||
with open(nginx_conf_path, 'wb') as f:
|
||||
f.write(nginx_config)
|
||||
try:
|
||||
pid = int(open(os.path.join(nginx_dir, 'nginx.pid')).read().strip(' \n'))
|
||||
os.kill(pid, signal.SIGHUP)
|
||||
except Exception:
|
||||
_logger.debug('start nginx')
|
||||
_logger.info('start nginx')
|
||||
if subprocess.call(['/usr/sbin/nginx', '-p', nginx_dir, '-c', 'nginx.conf']):
|
||||
# obscure nginx bug leaving orphan worker listening on nginx port
|
||||
if not subprocess.call(['pkill', '-f', '-P1', 'nginx: worker']):
|
||||
_logger.debug('failed to start nginx - orphan worker killed, retrying')
|
||||
_logger.warning('failed to start nginx - orphan worker killed, retrying')
|
||||
subprocess.call(['/usr/sbin/nginx', '-p', nginx_dir, '-c', 'nginx.conf'])
|
||||
else:
|
||||
_logger.debug('failed to start nginx - failed to kill orphan worker - oh well')
|
||||
_logger.warning('failed to start nginx - failed to kill orphan worker - oh well')
|
||||
|
||||
def _get_cron_period(self):
|
||||
""" Compute a randomized cron period with a 2 min margin below
|
||||
@ -349,7 +349,7 @@ class Runbot(models.AbstractModel):
|
||||
docker_stop(containers[c.id], c._path())
|
||||
ignored = {dc for dc in docker_ps_result if not dest_reg.match(dc)}
|
||||
if ignored:
|
||||
_logger.debug('docker (%s) not deleted because not dest format', list(ignored))
|
||||
_logger.info('docker (%s) not deleted because not dest format', list(ignored))
|
||||
|
||||
def warning(self, message, *args):
|
||||
if args:
|
||||
|
@ -342,7 +342,7 @@ class TestBuildResult(RunbotCase):
|
||||
self.assertEqual(other_build.local_state, 'done')
|
||||
self.assertEqual(other_build.local_result, 'skipped')
|
||||
log_first_part = '%s skip %%s' % (other_build.dest)
|
||||
mock_logger.debug.assert_called_with(log_first_part, 'A good reason')
|
||||
mock_logger.info.assert_called_with(log_first_part, 'A good reason')
|
||||
|
||||
def test_children(self):
|
||||
build1 = self.Build.create({
|
||||
|
Loading…
Reference in New Issue
Block a user