2018-02-28 16:31:05 +07:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
import datetime
|
|
|
|
import dateutil
|
|
|
|
import json
|
|
|
|
import logging
|
|
|
|
import os
|
2019-02-06 20:37:36 +07:00
|
|
|
import random
|
2018-02-28 16:31:05 +07:00
|
|
|
import re
|
|
|
|
import requests
|
|
|
|
import signal
|
|
|
|
import subprocess
|
|
|
|
import time
|
2019-07-22 17:30:22 +07:00
|
|
|
import glob
|
|
|
|
import shutil
|
2018-02-28 16:31:05 +07:00
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
from odoo.exceptions import UserError, ValidationError
|
2019-05-18 16:16:08 +07:00
|
|
|
from odoo.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
from odoo import models, fields, api, registry
|
2018-02-28 16:31:05 +07:00
|
|
|
from odoo.modules.module import get_module_resource
|
|
|
|
from odoo.tools import config
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
from ..common import fqdn, dt2time, Commit
|
2019-06-25 23:00:22 +07:00
|
|
|
from psycopg2.extensions import TransactionRollbackError
|
2018-02-28 16:31:05 +07:00
|
|
|
_logger = logging.getLogger(__name__)
|
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
class HashMissingException(Exception):
|
|
|
|
pass
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
class runbot_repo(models.Model):
|
|
|
|
|
|
|
|
_name = "runbot.repo"
|
|
|
|
|
|
|
|
name = fields.Char('Repository', required=True)
|
2019-04-19 14:12:10 +07:00
|
|
|
short_name = fields.Char('Repository', compute='_compute_short_name', store=False, readonly=True)
|
2018-02-28 16:31:05 +07:00
|
|
|
sequence = fields.Integer('Sequence')
|
|
|
|
path = fields.Char(compute='_get_path', string='Directory', readonly=True)
|
|
|
|
base = fields.Char(compute='_get_base_url', string='Base URL', readonly=True) # Could be renamed to a more explicit name like base_url
|
|
|
|
nginx = fields.Boolean('Nginx')
|
|
|
|
mode = fields.Selection([('disabled', 'Disabled'),
|
|
|
|
('poll', 'Poll'),
|
|
|
|
('hook', 'Hook')],
|
|
|
|
default='poll',
|
|
|
|
string="Mode", required=True, help="hook: Wait for webhook on /runbot/hook/<id> i.e. github push event")
|
2019-07-03 21:02:15 +07:00
|
|
|
hook_time = fields.Float('Last hook time')
|
2019-06-24 20:56:06 +07:00
|
|
|
get_ref_time = fields.Float('Last refs db update')
|
2018-02-28 16:31:05 +07:00
|
|
|
duplicate_id = fields.Many2one('runbot.repo', 'Duplicate repo', help='Repository for finding duplicate builds')
|
|
|
|
modules = fields.Char("Modules to install", help="Comma-separated list of modules to install and test.")
|
|
|
|
modules_auto = fields.Selection([('none', 'None (only explicit modules list)'),
|
|
|
|
('repo', 'Repository modules (excluding dependencies)'),
|
|
|
|
('all', 'All modules (including dependencies)')],
|
2019-04-22 20:49:33 +07:00
|
|
|
default='all',
|
2018-02-28 16:31:05 +07:00
|
|
|
string="Other modules to install automatically")
|
|
|
|
|
|
|
|
dependency_ids = fields.Many2many(
|
|
|
|
'runbot.repo', 'runbot_repo_dep_rel', column1='dependant_id', column2='dependency_id',
|
|
|
|
string='Extra dependencies',
|
|
|
|
help="Community addon repos which need to be present to run tests.")
|
|
|
|
token = fields.Char("Github token", groups="runbot.group_runbot_admin")
|
|
|
|
group_ids = fields.Many2many('res.groups', string='Limited to groups')
|
|
|
|
|
2019-04-22 20:49:33 +07:00
|
|
|
repo_config_id = fields.Many2one('runbot.build.config', 'Run Config')
|
|
|
|
config_id = fields.Many2one('runbot.build.config', 'Run Config', compute='_compute_config_id', inverse='_inverse_config_id')
|
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
server_files = fields.Char('Server files', help='Comma separated list of possible server files') # odoo-bin,openerp-server,openerp-server.py
|
2019-07-15 16:27:46 +07:00
|
|
|
manifest_files = fields.Char('Addons files', help='Comma separated list of possible addons files', default='__manifest__.py')
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
addons_paths = fields.Char('Addons files', help='Comma separated list of possible addons path', default='')
|
|
|
|
|
2019-04-22 20:49:33 +07:00
|
|
|
def _compute_config_id(self):
|
|
|
|
for repo in self:
|
|
|
|
if repo.repo_config_id:
|
|
|
|
repo.config_id = repo.repo_config_id
|
|
|
|
else:
|
|
|
|
repo.config_id = self.env.ref('runbot.runbot_build_config_default')
|
|
|
|
|
|
|
|
def _inverse_config_id(self):
|
|
|
|
for repo in self:
|
|
|
|
repo.repo_config_id = repo.config_id
|
|
|
|
|
2018-02-28 16:31:05 +07:00
|
|
|
def _root(self):
|
|
|
|
"""Return root directory of repository"""
|
|
|
|
default = os.path.join(os.path.dirname(__file__), '../static')
|
|
|
|
return os.path.abspath(default)
|
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
def _source_path(self, sha, *path):
|
|
|
|
"""
|
|
|
|
returns the absolute path to the source folder of the repo (adding option *path)
|
|
|
|
"""
|
|
|
|
self.ensure_one()
|
|
|
|
return os.path.join(self._root(), 'sources', self._get_repo_name_part(), sha, *path)
|
|
|
|
|
2018-02-28 16:31:05 +07:00
|
|
|
@api.depends('name')
|
|
|
|
def _get_path(self):
|
|
|
|
"""compute the server path of repo from the name"""
|
|
|
|
root = self._root()
|
|
|
|
for repo in self:
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
repo.path = os.path.join(root, 'repo', repo._sanitized_name(repo.name))
|
|
|
|
|
|
|
|
@api.model
|
|
|
|
def _sanitized_name(self, name):
|
|
|
|
for i in '@:/':
|
|
|
|
name = name.replace(i, '_')
|
|
|
|
return name
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
@api.depends('name')
|
|
|
|
def _get_base_url(self):
|
|
|
|
for repo in self:
|
|
|
|
name = re.sub('.+@', '', repo.name)
|
2018-03-12 23:43:24 +07:00
|
|
|
name = re.sub('^https://', '', name) # support https repo style
|
2018-02-28 16:31:05 +07:00
|
|
|
name = re.sub('.git$', '', name)
|
|
|
|
name = name.replace(':', '/')
|
|
|
|
repo.base = name
|
|
|
|
|
2019-04-19 14:12:10 +07:00
|
|
|
@api.depends('name', 'base')
|
|
|
|
def _compute_short_name(self):
|
|
|
|
for repo in self:
|
|
|
|
repo.short_name = '/'.join(repo.base.split('/')[-2:])
|
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
def _get_repo_name_part(self):
|
2019-07-25 20:07:28 +07:00
|
|
|
self.ensure_one()
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
return self._sanitized_name(self.name.split('/')[-1])
|
|
|
|
|
2018-02-28 16:31:05 +07:00
|
|
|
def _git(self, cmd):
|
|
|
|
"""Execute a git command 'cmd'"""
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
self.ensure_one()
|
|
|
|
cmd = ['git', '--git-dir=%s' % self.path] + cmd
|
|
|
|
_logger.debug("git command: %s", ' '.join(cmd))
|
|
|
|
return subprocess.check_output(cmd).decode('utf-8')
|
2018-02-28 16:31:05 +07:00
|
|
|
|
2019-03-08 16:48:04 +07:00
|
|
|
def _git_rev_parse(self, branch_name):
|
|
|
|
return self._git(['rev-parse', branch_name]).strip()
|
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
def _git_export(self, sha):
|
|
|
|
"""Export a git repo into a sources"""
|
|
|
|
# TODO add automated tests
|
2018-02-28 16:31:05 +07:00
|
|
|
self.ensure_one()
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
export_path = self._source_path(sha)
|
|
|
|
|
|
|
|
if os.path.isdir(export_path):
|
|
|
|
_logger.info('git export: checkouting to %s (already exists)' % export_path)
|
|
|
|
return export_path
|
|
|
|
|
|
|
|
if not self._hash_exists(sha):
|
|
|
|
self._update(force=True)
|
|
|
|
if not self._hash_exists(sha):
|
|
|
|
try:
|
|
|
|
self._git(['fetch', 'origin', sha])
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
if not self._hash_exists(sha):
|
|
|
|
raise HashMissingException()
|
|
|
|
|
|
|
|
_logger.info('git export: checkouting to %s (new)' % export_path)
|
|
|
|
os.makedirs(export_path)
|
|
|
|
p1 = subprocess.Popen(['git', '--git-dir=%s' % self.path, 'archive', sha], stdout=subprocess.PIPE)
|
|
|
|
p2 = subprocess.Popen(['tar', '-xmC', export_path], stdin=p1.stdout, stdout=subprocess.PIPE)
|
2018-02-28 16:31:05 +07:00
|
|
|
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
|
|
|
|
p2.communicate()[0]
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
# TODO get result and fallback on cleaing in case of problem
|
|
|
|
return export_path
|
2018-02-28 16:31:05 +07:00
|
|
|
|
2019-02-25 21:18:10 +07:00
|
|
|
def _hash_exists(self, commit_hash):
|
|
|
|
""" Verify that a commit hash exists in the repo """
|
|
|
|
self.ensure_one()
|
|
|
|
try:
|
|
|
|
self._git(['cat-file', '-e', commit_hash])
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2019-06-05 22:11:10 +07:00
|
|
|
def _github(self, url, payload=None, ignore_errors=False, nb_tries=2):
|
2018-02-28 16:31:05 +07:00
|
|
|
"""Return a http request to be sent to github"""
|
|
|
|
for repo in self:
|
|
|
|
if not repo.token:
|
|
|
|
return
|
2019-06-05 22:11:10 +07:00
|
|
|
match_object = re.search('([^/]+)/([^/]+)/([^/.]+(.git)?)', repo.base)
|
|
|
|
if match_object:
|
|
|
|
url = url.replace(':owner', match_object.group(2))
|
|
|
|
url = url.replace(':repo', match_object.group(3))
|
|
|
|
url = 'https://api.%s%s' % (match_object.group(1), url)
|
|
|
|
session = requests.Session()
|
|
|
|
session.auth = (repo.token, 'x-oauth-basic')
|
|
|
|
session.headers.update({'Accept': 'application/vnd.github.she-hulk-preview+json'})
|
|
|
|
try_count = 0
|
|
|
|
while try_count < nb_tries:
|
|
|
|
try:
|
|
|
|
if payload:
|
|
|
|
response = session.post(url, data=json.dumps(payload))
|
|
|
|
else:
|
|
|
|
response = session.get(url)
|
|
|
|
response.raise_for_status()
|
|
|
|
if try_count > 0:
|
|
|
|
_logger.info('Success after %s tries' % (try_count + 1))
|
|
|
|
return response.json()
|
|
|
|
except Exception as e:
|
|
|
|
try_count += 1
|
|
|
|
if try_count < nb_tries:
|
|
|
|
time.sleep(2)
|
|
|
|
else:
|
|
|
|
if ignore_errors:
|
|
|
|
_logger.exception('Ignored github error %s %r (try %s/%s)' % (url, payload, try_count + 1, nb_tries))
|
|
|
|
else:
|
|
|
|
raise
|
2018-02-28 16:31:05 +07:00
|
|
|
|
2019-05-17 19:49:57 +07:00
|
|
|
def _get_fetch_head_time(self):
|
|
|
|
self.ensure_one()
|
|
|
|
fname_fetch_head = os.path.join(self.path, 'FETCH_HEAD')
|
|
|
|
if os.path.exists(fname_fetch_head):
|
|
|
|
return os.path.getmtime(fname_fetch_head)
|
|
|
|
|
2019-04-30 20:31:01 +07:00
|
|
|
def _get_refs(self):
|
|
|
|
"""Find new refs
|
|
|
|
:return: list of tuples with following refs informations:
|
|
|
|
name, sha, date, author, author_email, subject, committer, committer_email
|
|
|
|
"""
|
2018-02-28 16:31:05 +07:00
|
|
|
self.ensure_one()
|
2019-05-17 19:49:57 +07:00
|
|
|
|
|
|
|
get_ref_time = self._get_fetch_head_time()
|
2019-06-24 20:56:06 +07:00
|
|
|
if not self.get_ref_time or get_ref_time > self.get_ref_time:
|
|
|
|
self.get_ref_time = get_ref_time
|
2019-05-17 19:49:57 +07:00
|
|
|
fields = ['refname', 'objectname', 'committerdate:iso8601', 'authorname', 'authoremail', 'subject', 'committername', 'committeremail']
|
|
|
|
fmt = "%00".join(["%(" + field + ")" for field in fields])
|
|
|
|
git_refs = self._git(['for-each-ref', '--format', fmt, '--sort=-committerdate', 'refs/heads', 'refs/pull'])
|
|
|
|
git_refs = git_refs.strip()
|
|
|
|
return [tuple(field for field in line.split('\x00')) for line in git_refs.split('\n')]
|
|
|
|
else:
|
|
|
|
return []
|
2019-04-30 20:31:01 +07:00
|
|
|
|
|
|
|
def _find_or_create_branches(self, refs):
|
|
|
|
"""Parse refs and create branches that does not exists yet
|
|
|
|
:param refs: list of tuples returned by _get_refs()
|
|
|
|
:return: dict {branch.name: branch.id}
|
|
|
|
The returned structure contains all the branches from refs newly created
|
|
|
|
or older ones.
|
|
|
|
"""
|
|
|
|
Branch = self.env['runbot.branch']
|
2018-02-28 16:31:05 +07:00
|
|
|
self.env.cr.execute("""
|
|
|
|
WITH t (branch) AS (SELECT unnest(%s))
|
|
|
|
SELECT t.branch, b.id
|
|
|
|
FROM t LEFT JOIN runbot_branch b ON (b.name = t.branch)
|
|
|
|
WHERE b.repo_id = %s;
|
2019-03-08 16:48:04 +07:00
|
|
|
""", ([r[0] for r in refs], self.id))
|
2018-02-28 16:31:05 +07:00
|
|
|
ref_branches = {r[0]: r[1] for r in self.env.cr.fetchall()}
|
|
|
|
|
|
|
|
for name, sha, date, author, author_email, subject, committer, committer_email in refs:
|
2019-04-30 20:31:01 +07:00
|
|
|
if not ref_branches.get(name):
|
2019-03-08 16:48:04 +07:00
|
|
|
_logger.debug('repo %s found new branch %s', self.name, name)
|
2019-04-30 20:31:01 +07:00
|
|
|
new_branch = Branch.create({'repo_id': self.id, 'name': name})
|
|
|
|
ref_branches[name] = new_branch.id
|
|
|
|
return ref_branches
|
|
|
|
|
|
|
|
def _find_new_commits(self, refs, ref_branches):
|
|
|
|
"""Find new commits in bare repo
|
|
|
|
:param refs: list of tuples returned by _get_refs()
|
|
|
|
:param ref_branches: dict structure {branch.name: branch.id}
|
|
|
|
described in _find_or_create_branches
|
|
|
|
"""
|
|
|
|
self.ensure_one()
|
|
|
|
Branch = self.env['runbot.branch']
|
|
|
|
Build = self.env['runbot.build']
|
|
|
|
icp = self.env['ir.config_parameter']
|
|
|
|
max_age = int(icp.get_param('runbot.runbot_max_age', default=30))
|
|
|
|
|
|
|
|
self.env.cr.execute("""
|
|
|
|
WITH t (build, branch_id) AS (SELECT unnest(%s), unnest(%s))
|
|
|
|
SELECT b.name, b.branch_id
|
|
|
|
FROM t LEFT JOIN runbot_build b ON (b.name = t.build) AND (b.branch_id = t.branch_id)
|
|
|
|
""", ([r[1] for r in refs], [ref_branches[r[0]] for r in refs]))
|
|
|
|
# generate a set of tuples (branch_id, sha)
|
|
|
|
builds_candidates = {(r[1], r[0]) for r in self.env.cr.fetchall()}
|
|
|
|
|
|
|
|
for name, sha, date, author, author_email, subject, committer, committer_email in refs:
|
|
|
|
branch = Branch.browse(ref_branches[name])
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
# skip the build for old branches (Could be checked before creating the branch in DB ?)
|
|
|
|
if dateutil.parser.parse(date[:19]) + datetime.timedelta(days=max_age) < datetime.datetime.now():
|
|
|
|
continue
|
|
|
|
|
|
|
|
# create build (and mark previous builds as skipped) if not found
|
2019-04-30 20:31:01 +07:00
|
|
|
if not (branch.id, sha) in builds_candidates:
|
2019-04-22 20:49:33 +07:00
|
|
|
if branch.no_auto_build or branch.no_build:
|
|
|
|
continue
|
2019-04-30 20:31:01 +07:00
|
|
|
_logger.debug('repo %s branch %s new build found revno %s', self.name, branch.name, sha)
|
2018-02-28 16:31:05 +07:00
|
|
|
build_info = {
|
|
|
|
'branch_id': branch.id,
|
|
|
|
'name': sha,
|
|
|
|
'author': author,
|
|
|
|
'author_email': author_email,
|
|
|
|
'committer': committer,
|
|
|
|
'committer_email': committer_email,
|
|
|
|
'subject': subject,
|
|
|
|
'date': dateutil.parser.parse(date[:19]),
|
|
|
|
}
|
|
|
|
if not branch.sticky:
|
|
|
|
# pending builds are skipped as we have a new ref
|
|
|
|
builds_to_skip = Build.search(
|
2019-04-22 20:49:33 +07:00
|
|
|
[('branch_id', '=', branch.id), ('local_state', '=', 'pending')],
|
2018-02-28 16:31:05 +07:00
|
|
|
order='sequence asc')
|
2018-03-28 18:18:50 +07:00
|
|
|
builds_to_skip._skip(reason='New ref found')
|
2018-02-28 16:31:05 +07:00
|
|
|
if builds_to_skip:
|
|
|
|
build_info['sequence'] = builds_to_skip[0].sequence
|
2018-06-06 21:31:33 +07:00
|
|
|
# testing builds are killed
|
|
|
|
builds_to_kill = Build.search([
|
|
|
|
('branch_id', '=', branch.id),
|
2019-04-22 20:49:33 +07:00
|
|
|
('local_state', '=', 'testing'),
|
2018-06-06 21:31:33 +07:00
|
|
|
('committer', '=', committer)
|
|
|
|
])
|
|
|
|
for btk in builds_to_kill:
|
2019-04-22 20:49:33 +07:00
|
|
|
btk._log('repo._update_git', 'Build automatically killed, newer build found.', level='WARNING')
|
2019-06-28 17:17:04 +07:00
|
|
|
builds_to_kill.write({'requested_action': 'deathrow'})
|
2018-06-06 21:31:33 +07:00
|
|
|
|
2018-03-28 18:18:50 +07:00
|
|
|
new_build = Build.create(build_info)
|
|
|
|
# create a reverse dependency build if needed
|
|
|
|
if branch.sticky:
|
|
|
|
for rev_repo in self.search([('dependency_ids', 'in', self.id)]):
|
|
|
|
# find the latest build with the same branch name
|
2019-07-31 21:48:27 +07:00
|
|
|
latest_rev_build = Build.search([('build_type', '=', 'normal'), ('hidden', '=', 'False'), ('repo_id.id', '=', rev_repo.id), ('branch_id.branch_name', '=', branch.branch_name)], order='id desc', limit=1)
|
2018-03-28 18:18:50 +07:00
|
|
|
if latest_rev_build:
|
|
|
|
_logger.debug('Reverse dependency build %s forced in repo %s by commit %s', latest_rev_build.dest, rev_repo.name, sha[:6])
|
2019-07-31 21:48:27 +07:00
|
|
|
indirect = latest_rev_build._force(message='Rebuild from dependency %s commit %s' % (self.name, sha[:6]))
|
2019-08-14 22:07:29 +07:00
|
|
|
if not indirect:
|
|
|
|
_logger.exception('Failed to create indirect for %s from %s in repo %s', new_build, latest_rev_build, rev_repo)
|
|
|
|
else:
|
|
|
|
indirect.build_type = 'indirect'
|
|
|
|
new_build.revdep_build_ids += indirect
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
# skip old builds (if their sequence number is too low, they will not ever be built)
|
2019-04-22 20:49:33 +07:00
|
|
|
skippable_domain = [('repo_id', '=', self.id), ('local_state', '=', 'pending')]
|
2018-02-28 16:31:05 +07:00
|
|
|
icp = self.env['ir.config_parameter']
|
2018-03-28 08:11:35 +07:00
|
|
|
running_max = int(icp.get_param('runbot.runbot_running_max', default=75))
|
2018-02-28 16:31:05 +07:00
|
|
|
builds_to_be_skipped = Build.search(skippable_domain, order='sequence desc', offset=running_max)
|
|
|
|
builds_to_be_skipped._skip()
|
|
|
|
|
2019-04-30 20:31:01 +07:00
|
|
|
@api.multi
|
|
|
|
def _create_pending_builds(self):
|
2019-02-06 20:37:36 +07:00
|
|
|
""" Find new commits in physical repos"""
|
2019-04-30 20:31:01 +07:00
|
|
|
refs = {}
|
|
|
|
ref_branches = {}
|
|
|
|
for repo in self:
|
2019-02-06 20:37:36 +07:00
|
|
|
try:
|
2019-05-20 15:52:13 +07:00
|
|
|
ref = repo._get_refs()
|
2019-06-24 19:18:01 +07:00
|
|
|
max_age = int(self.env['ir.config_parameter'].get_param('runbot.runbot_max_age', default=30))
|
|
|
|
good_refs = [r for r in ref if dateutil.parser.parse(r[2][:19]) + datetime.timedelta(days=max_age) > datetime.datetime.now()]
|
|
|
|
if good_refs:
|
|
|
|
refs[repo] = good_refs
|
2019-02-06 20:37:36 +07:00
|
|
|
except Exception:
|
2019-04-30 20:31:01 +07:00
|
|
|
_logger.exception('Fail to get refs for repo %s', repo.name)
|
|
|
|
if repo in refs:
|
|
|
|
ref_branches[repo] = repo._find_or_create_branches(refs[repo])
|
|
|
|
|
|
|
|
# keep _find_or_create_branches separated from build creation to ease
|
|
|
|
# closest branch detection
|
|
|
|
for repo in self:
|
|
|
|
if repo in refs:
|
|
|
|
repo._find_new_commits(refs[repo], ref_branches[repo])
|
2019-02-06 20:37:36 +07:00
|
|
|
|
|
|
|
def _clone(self):
|
|
|
|
""" Clone the remote repo if needed """
|
|
|
|
self.ensure_one()
|
|
|
|
repo = self
|
|
|
|
if not os.path.isdir(os.path.join(repo.path, 'refs')):
|
|
|
|
_logger.info("Cloning repository '%s' in '%s'" % (repo.name, repo.path))
|
|
|
|
subprocess.call(['git', 'clone', '--bare', repo.name, repo.path])
|
|
|
|
|
2019-02-25 21:18:10 +07:00
|
|
|
def _update_git(self, force):
|
2019-02-06 20:37:36 +07:00
|
|
|
""" Update the git repo on FS """
|
|
|
|
self.ensure_one()
|
|
|
|
repo = self
|
|
|
|
_logger.debug('repo %s updating branches', repo.name)
|
|
|
|
|
|
|
|
if not os.path.isdir(os.path.join(repo.path)):
|
|
|
|
os.makedirs(repo.path)
|
|
|
|
self._clone()
|
|
|
|
|
|
|
|
# check for mode == hook
|
|
|
|
fname_fetch_head = os.path.join(repo.path, 'FETCH_HEAD')
|
2019-02-25 21:18:10 +07:00
|
|
|
if not force and os.path.isfile(fname_fetch_head):
|
2019-02-06 20:37:36 +07:00
|
|
|
fetch_time = os.path.getmtime(fname_fetch_head)
|
2019-07-03 21:02:15 +07:00
|
|
|
if repo.mode == 'hook' and (not repo.hook_time or repo.hook_time < fetch_time):
|
2019-02-06 20:37:36 +07:00
|
|
|
t0 = time.time()
|
|
|
|
_logger.debug('repo %s skip hook fetch fetch_time: %ss ago hook_time: %ss ago',
|
2019-07-03 21:02:15 +07:00
|
|
|
repo.name, int(t0 - fetch_time), int(t0 - repo.hook_time) if repo.hook_time else 'never')
|
2019-02-06 20:37:36 +07:00
|
|
|
return
|
2019-04-22 20:49:33 +07:00
|
|
|
|
2019-05-03 18:51:09 +07:00
|
|
|
self._update_fetch_cmd()
|
2019-02-06 20:37:36 +07:00
|
|
|
|
2019-05-03 18:51:09 +07:00
|
|
|
def _update_fetch_cmd(self):
|
|
|
|
# Extracted from update_git to be easily overriden in external module
|
|
|
|
self.ensure_one()
|
|
|
|
repo = self
|
2019-02-06 20:37:36 +07:00
|
|
|
repo._git(['fetch', '-p', 'origin', '+refs/heads/*:refs/heads/*', '+refs/pull/*/head:refs/pull/*'])
|
|
|
|
|
2019-04-30 20:31:01 +07:00
|
|
|
@api.multi
|
|
|
|
def _update(self, force=True):
|
2018-02-28 16:31:05 +07:00
|
|
|
""" Update the physical git reposotories on FS"""
|
2019-07-03 18:50:31 +07:00
|
|
|
for repo in reversed(self):
|
2018-02-28 16:31:05 +07:00
|
|
|
try:
|
2019-02-25 21:18:10 +07:00
|
|
|
repo._update_git(force)
|
2018-02-28 16:31:05 +07:00
|
|
|
except Exception:
|
|
|
|
_logger.exception('Fail to update repo %s', repo.name)
|
|
|
|
|
2019-04-30 20:31:01 +07:00
|
|
|
@api.multi
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
def _scheduler(self, host=None):
|
2018-02-28 16:31:05 +07:00
|
|
|
"""Schedule builds for the repository"""
|
2019-04-30 20:31:01 +07:00
|
|
|
ids = self.ids
|
2019-02-06 20:37:36 +07:00
|
|
|
if not ids:
|
|
|
|
return
|
2018-02-28 16:31:05 +07:00
|
|
|
icp = self.env['ir.config_parameter']
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
host = host or self.env['runbot.host']._get_current()
|
|
|
|
workers = host.get_nb_worker()
|
2019-06-15 12:45:20 +07:00
|
|
|
running_max = int(icp.get_param('runbot.runbot_running_max', default=75))
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
assigned_only = host.assigned_only
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
Build = self.env['runbot.build']
|
2019-04-22 20:49:33 +07:00
|
|
|
domain = [('repo_id', 'in', ids)]
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
domain_host = domain + [('host', '=', host.name)]
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
# schedule jobs (transitions testing -> running, kill jobs, ...)
|
2019-06-28 17:17:04 +07:00
|
|
|
build_ids = Build.search(domain_host + ['|', ('local_state', 'in', ['testing', 'running']), ('requested_action', 'in', ['wake_up', 'deathrow'])])
|
2018-02-28 16:31:05 +07:00
|
|
|
build_ids._schedule()
|
2019-05-22 15:11:58 +07:00
|
|
|
self.env.cr.commit()
|
2019-06-25 17:58:11 +07:00
|
|
|
self.invalidate_cache()
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
# launch new tests
|
2019-04-22 20:49:33 +07:00
|
|
|
|
|
|
|
nb_testing = Build.search_count(domain_host + [('local_state', '=', 'testing')])
|
2019-02-06 20:37:36 +07:00
|
|
|
available_slots = workers - nb_testing
|
2019-04-22 20:49:33 +07:00
|
|
|
reserved_slots = Build.search_count(domain_host + [('local_state', '=', 'pending')])
|
2019-07-03 21:11:32 +07:00
|
|
|
assignable_slots = (available_slots - reserved_slots) if not assigned_only else 0
|
2019-05-22 15:11:58 +07:00
|
|
|
if available_slots > 0:
|
2019-04-22 20:49:33 +07:00
|
|
|
if assignable_slots > 0: # note: slots have been addapt to be able to force host on pending build. Normally there is no pending with host.
|
|
|
|
# commit transaction to reduce the critical section duration
|
2019-06-13 15:51:20 +07:00
|
|
|
def allocate_builds(where_clause, limit):
|
|
|
|
self.env.cr.commit()
|
2019-06-25 17:58:11 +07:00
|
|
|
self.invalidate_cache()
|
2019-06-13 15:51:20 +07:00
|
|
|
# self-assign to be sure that another runbot instance cannot self assign the same builds
|
|
|
|
query = """UPDATE
|
|
|
|
runbot_build
|
|
|
|
SET
|
|
|
|
host = %%(host)s
|
2019-04-22 20:49:33 +07:00
|
|
|
WHERE
|
2019-06-13 15:51:20 +07:00
|
|
|
runbot_build.id IN (
|
|
|
|
SELECT runbot_build.id
|
|
|
|
FROM runbot_build
|
|
|
|
LEFT JOIN runbot_branch
|
|
|
|
ON runbot_branch.id = runbot_build.branch_id
|
|
|
|
WHERE
|
|
|
|
runbot_build.repo_id IN %%(repo_ids)s
|
|
|
|
AND runbot_build.local_state = 'pending'
|
|
|
|
AND runbot_build.host IS NULL
|
|
|
|
%s
|
|
|
|
ORDER BY
|
|
|
|
array_position(array['normal','rebuild','indirect','scheduled']::varchar[], runbot_build.build_type) ASC,
|
|
|
|
runbot_branch.sticky DESC,
|
|
|
|
runbot_branch.priority DESC,
|
|
|
|
runbot_build.sequence ASC
|
|
|
|
FOR UPDATE OF runbot_build SKIP LOCKED
|
|
|
|
LIMIT %%(limit)s
|
|
|
|
)
|
|
|
|
RETURNING id""" % where_clause
|
|
|
|
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
self.env.cr.execute(query, {'repo_ids': tuple(ids), 'host': host.name, 'limit': limit})
|
2019-06-13 15:51:20 +07:00
|
|
|
return self.env.cr.fetchall()
|
|
|
|
|
|
|
|
allocated = allocate_builds("""AND runbot_build.build_type != 'scheduled'""", assignable_slots)
|
2019-07-22 18:02:24 +07:00
|
|
|
if allocated:
|
|
|
|
_logger.debug('Normal builds %s where allocated to runbot' % allocated)
|
2019-06-13 15:51:20 +07:00
|
|
|
weak_slot = assignable_slots - len(allocated) - 1
|
|
|
|
if weak_slot > 0:
|
|
|
|
allocated = allocate_builds('', weak_slot)
|
2019-07-22 18:02:24 +07:00
|
|
|
if allocated:
|
|
|
|
_logger.debug('Scheduled builds %s where allocated to runbot' % allocated)
|
2019-06-13 15:51:20 +07:00
|
|
|
|
2019-04-22 20:49:33 +07:00
|
|
|
pending_build = Build.search(domain_host + [('local_state', '=', 'pending')], limit=available_slots)
|
2019-02-06 20:37:36 +07:00
|
|
|
if pending_build:
|
|
|
|
pending_build._schedule()
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
# terminate and reap doomed build
|
2019-06-28 17:17:04 +07:00
|
|
|
build_ids = Build.search(domain_host + [('local_state', '=', 'running')], order='job_start desc').ids
|
2018-02-28 16:31:05 +07:00
|
|
|
# sort builds: the last build of each sticky branch then the rest
|
|
|
|
sticky = {}
|
|
|
|
non_sticky = []
|
|
|
|
for build in Build.browse(build_ids):
|
|
|
|
if build.branch_id.sticky and build.branch_id.id not in sticky:
|
|
|
|
sticky[build.branch_id.id] = build.id
|
|
|
|
else:
|
|
|
|
non_sticky.append(build.id)
|
|
|
|
build_ids = list(sticky.values())
|
|
|
|
build_ids += non_sticky
|
|
|
|
# terminate extra running builds
|
|
|
|
Build.browse(build_ids)[running_max:]._kill()
|
|
|
|
Build.browse(build_ids)._reap()
|
|
|
|
|
|
|
|
def _domain(self):
|
|
|
|
return self.env.get('ir.config_parameter').get_param('runbot.runbot_domain', fqdn())
|
|
|
|
|
|
|
|
def _reload_nginx(self):
|
|
|
|
settings = {}
|
|
|
|
settings['port'] = config.get('http_port')
|
|
|
|
settings['runbot_static'] = os.path.join(get_module_resource('runbot', 'static'), '')
|
|
|
|
nginx_dir = os.path.join(self._root(), 'nginx')
|
|
|
|
settings['nginx_dir'] = nginx_dir
|
|
|
|
settings['re_escape'] = re.escape
|
2018-06-01 21:37:43 +07:00
|
|
|
settings['fqdn'] = fqdn()
|
2018-02-28 16:31:05 +07:00
|
|
|
nginx_repos = self.search([('nginx', '=', True)], order='id')
|
|
|
|
if nginx_repos:
|
2019-04-22 20:49:33 +07:00
|
|
|
settings['builds'] = self.env['runbot.build'].search([('repo_id', 'in', nginx_repos.ids), ('local_state', '=', 'running'), ('host', '=', fqdn())])
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
nginx_config = self.env['ir.ui.view'].render_template("runbot.nginx_config", settings)
|
|
|
|
os.makedirs(nginx_dir, exist_ok=True)
|
2019-07-22 18:02:24 +07:00
|
|
|
content = None
|
|
|
|
with open(os.path.join(nginx_dir, 'nginx.conf'), 'rb') as f:
|
|
|
|
content = f.read()
|
|
|
|
if content != nginx_config:
|
2018-02-28 16:31:05 +07:00
|
|
|
_logger.debug('reload nginx')
|
2019-07-22 18:02:24 +07:00
|
|
|
with open(os.path.join(nginx_dir, 'nginx.conf'), 'wb') as f:
|
|
|
|
f.write(nginx_config)
|
|
|
|
try:
|
|
|
|
pid = int(open(os.path.join(nginx_dir, 'nginx.pid')).read().strip(' \n'))
|
|
|
|
os.kill(pid, signal.SIGHUP)
|
|
|
|
except Exception:
|
|
|
|
_logger.debug('start nginx')
|
|
|
|
if subprocess.call(['/usr/sbin/nginx', '-p', nginx_dir, '-c', 'nginx.conf']):
|
|
|
|
# obscure nginx bug leaving orphan worker listening on nginx port
|
|
|
|
if not subprocess.call(['pkill', '-f', '-P1', 'nginx: worker']):
|
|
|
|
_logger.debug('failed to start nginx - orphan worker killed, retrying')
|
|
|
|
subprocess.call(['/usr/sbin/nginx', '-p', nginx_dir, '-c', 'nginx.conf'])
|
|
|
|
else:
|
|
|
|
_logger.debug('failed to start nginx - failed to kill orphan worker - oh well')
|
2018-02-28 16:31:05 +07:00
|
|
|
|
2019-02-06 20:37:36 +07:00
|
|
|
def _get_cron_period(self, min_margin=120):
|
|
|
|
""" Compute a randomized cron period with a 2 min margin below
|
|
|
|
real cron timeout from config.
|
|
|
|
"""
|
|
|
|
cron_limit = config.get('limit_time_real_cron')
|
|
|
|
req_limit = config.get('limit_time_real')
|
|
|
|
cron_timeout = cron_limit if cron_limit > -1 else req_limit
|
|
|
|
return cron_timeout - (min_margin + random.randint(1, 60))
|
|
|
|
|
|
|
|
def _cron_fetch_and_schedule(self, hostname):
|
|
|
|
"""This method have to be called from a dedicated cron on a runbot
|
|
|
|
in charge of orchestration.
|
|
|
|
"""
|
|
|
|
if hostname != fqdn():
|
|
|
|
return 'Not for me'
|
|
|
|
start_time = time.time()
|
|
|
|
timeout = self._get_cron_period()
|
|
|
|
icp = self.env['ir.config_parameter']
|
|
|
|
update_frequency = int(icp.get_param('runbot.runbot_update_frequency', default=10))
|
|
|
|
while time.time() - start_time < timeout:
|
|
|
|
repos = self.search([('mode', '!=', 'disabled')])
|
2019-04-30 20:31:01 +07:00
|
|
|
repos._update(force=False)
|
|
|
|
repos._create_pending_builds()
|
2019-03-08 16:48:04 +07:00
|
|
|
|
2019-02-06 20:37:36 +07:00
|
|
|
self.env.cr.commit()
|
2019-03-15 14:08:22 +07:00
|
|
|
self.invalidate_cache()
|
2019-02-06 20:37:36 +07:00
|
|
|
time.sleep(update_frequency)
|
|
|
|
|
|
|
|
def _cron_fetch_and_build(self, hostname):
|
|
|
|
""" This method have to be called from a dedicated cron
|
|
|
|
created on each runbot instance.
|
|
|
|
"""
|
|
|
|
if hostname != fqdn():
|
|
|
|
return 'Not for me'
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
host = self.env['runbot.host']._get_current()
|
|
|
|
host.set_psql_conn_count()
|
|
|
|
host.last_start_loop = fields.Datetime.now()
|
|
|
|
self.env.cr.commit()
|
2019-02-06 20:37:36 +07:00
|
|
|
start_time = time.time()
|
2019-07-22 17:30:22 +07:00
|
|
|
# 1. source cleanup
|
|
|
|
# -> Remove sources when no build is using them
|
|
|
|
# (could be usefull to keep them for wakeup but we can checkout them again if not forced push)
|
|
|
|
self.env['runbot.repo']._source_cleanup()
|
|
|
|
# 2. db and log cleanup
|
|
|
|
# -> Keep them as long as possible
|
|
|
|
self.env['runbot.build']._local_cleanup()
|
|
|
|
|
2019-02-06 20:37:36 +07:00
|
|
|
timeout = self._get_cron_period()
|
|
|
|
icp = self.env['ir.config_parameter']
|
|
|
|
update_frequency = int(icp.get_param('runbot.runbot_update_frequency', default=10))
|
|
|
|
while time.time() - start_time < timeout:
|
|
|
|
repos = self.search([('mode', '!=', 'disabled')])
|
2019-06-25 23:00:22 +07:00
|
|
|
try:
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
repos._scheduler(host)
|
|
|
|
host.last_success = fields.Datetime.now()
|
2019-06-25 23:00:22 +07:00
|
|
|
self.env.cr.commit()
|
|
|
|
self.env.reset()
|
|
|
|
self = self.env()[self._name]
|
|
|
|
self._reload_nginx()
|
|
|
|
time.sleep(update_frequency)
|
|
|
|
except TransactionRollbackError:
|
|
|
|
_logger.exception('Trying to rollback')
|
|
|
|
self.env.cr.rollback()
|
|
|
|
self.env.reset()
|
|
|
|
time.sleep(random.uniform(0, 1))
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
except Exception as e:
|
|
|
|
with registry(self._cr.dbname).cursor() as cr: # user another cursor since transaction will be rollbacked
|
|
|
|
message = str(e)
|
|
|
|
chost = host.with_env(self.env(cr=cr))
|
|
|
|
if chost.last_exception == message:
|
|
|
|
chost.exception_count += 1
|
|
|
|
else:
|
|
|
|
chost.with_env(self.env(cr=cr)).last_exception = str(e)
|
|
|
|
chost.exception_count = 1
|
|
|
|
raise
|
|
|
|
|
|
|
|
if host.last_exception:
|
|
|
|
host.last_exception = ""
|
|
|
|
host.exception_count = 0
|
|
|
|
host.last_end_loop = fields.Datetime.now()
|
2019-07-22 17:30:22 +07:00
|
|
|
|
|
|
|
def _source_cleanup(self):
|
|
|
|
try:
|
|
|
|
if self.pool._init:
|
|
|
|
return
|
|
|
|
_logger.info('Source cleaning')
|
|
|
|
# we can remove a source only if no build are using them as name or rependency_ids aka as commit
|
|
|
|
cannot_be_deleted_builds = self.env['runbot.build'].search([('host', '=', fqdn()), ('local_state', 'not in', ('done', 'duplicate'))])
|
|
|
|
cannot_be_deleted_path = set()
|
|
|
|
for build in cannot_be_deleted_builds:
|
|
|
|
for commit in build._get_all_commit():
|
|
|
|
cannot_be_deleted_path.add(commit._source_path())
|
|
|
|
|
|
|
|
to_delete = set()
|
|
|
|
to_keep = set()
|
|
|
|
repos = self.search([('mode', '!=', 'disabled')])
|
|
|
|
for repo in repos:
|
|
|
|
repo_source = os.path.join(repo._root(), 'sources', repo._get_repo_name_part(), '*')
|
|
|
|
for source_dir in glob.glob(repo_source):
|
|
|
|
if source_dir not in cannot_be_deleted_path:
|
|
|
|
to_delete.add(source_dir)
|
|
|
|
else:
|
|
|
|
to_keep.add(source_dir)
|
|
|
|
|
|
|
|
# we are comparing cannot_be_deleted_path with to keep to sensure that the algorithm is working, we want to avoid to erase file by mistake
|
|
|
|
# note: it is possible that a parent_build is in testing without checkouting sources, but it should be exceptions
|
|
|
|
if to_delete:
|
|
|
|
if cannot_be_deleted_path == to_keep:
|
|
|
|
to_delete = list(to_delete)
|
|
|
|
to_keep = list(to_keep)
|
|
|
|
cannot_be_deleted_path = list(cannot_be_deleted_path)
|
|
|
|
for source_dir in to_delete:
|
|
|
|
_logger.info('Deleting source: %s' % source_dir)
|
|
|
|
assert 'static' in source_dir
|
|
|
|
shutil.rmtree(source_dir)
|
|
|
|
_logger.info('%s/%s source folder where deleted (%s kept)' % (len(to_delete), len(to_delete+to_keep), len(to_keep)))
|
|
|
|
else:
|
2019-07-22 18:02:24 +07:00
|
|
|
_logger.warning('Inconsistency between sources and database: \n%s \n%s' % (cannot_be_deleted_path-to_keep, to_keep-cannot_be_deleted_path))
|
2019-07-22 17:30:22 +07:00
|
|
|
|
|
|
|
except:
|
|
|
|
_logger.error('An exception occured while cleaning sources')
|
|
|
|
pass
|