2018-02-28 16:31:05 +07:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
import datetime
|
|
|
|
import dateutil
|
|
|
|
import json
|
|
|
|
import logging
|
2019-02-06 20:37:36 +07:00
|
|
|
import random
|
2018-02-28 16:31:05 +07:00
|
|
|
import re
|
|
|
|
import requests
|
|
|
|
import signal
|
|
|
|
import subprocess
|
|
|
|
import time
|
2019-07-22 17:30:22 +07:00
|
|
|
import glob
|
|
|
|
import shutil
|
2018-02-28 16:31:05 +07:00
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
from odoo.exceptions import UserError, ValidationError
|
2019-05-18 16:16:08 +07:00
|
|
|
from odoo.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
from odoo import models, fields, api, registry
|
2018-02-28 16:31:05 +07:00
|
|
|
from odoo.modules.module import get_module_resource
|
|
|
|
from odoo.tools import config
|
2019-12-17 17:27:11 +07:00
|
|
|
from odoo.osv import expression
|
2019-11-10 23:16:41 +07:00
|
|
|
from ..common import fqdn, dt2time, Commit, dest_reg, os
|
2019-10-21 20:17:58 +07:00
|
|
|
from ..container import docker_ps, docker_stop
|
2019-06-25 23:00:22 +07:00
|
|
|
from psycopg2.extensions import TransactionRollbackError
|
2019-12-17 17:27:11 +07:00
|
|
|
|
2018-02-28 16:31:05 +07:00
|
|
|
_logger = logging.getLogger(__name__)
|
|
|
|
|
2019-11-27 21:17:16 +07:00
|
|
|
class RunbotException(Exception):
|
2019-08-22 19:36:30 +07:00
|
|
|
pass
|
|
|
|
|
2018-02-28 16:31:05 +07:00
|
|
|
class runbot_repo(models.Model):
|
|
|
|
|
|
|
|
_name = "runbot.repo"
|
2020-01-02 22:38:49 +07:00
|
|
|
_description = "Repo"
|
2019-07-24 01:13:05 +07:00
|
|
|
_order = 'sequence, id'
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
name = fields.Char('Repository', required=True)
|
2020-01-02 22:57:51 +07:00
|
|
|
short_name = fields.Char('Short name', compute='_compute_short_name', store=False, readonly=True)
|
2018-02-28 16:31:05 +07:00
|
|
|
sequence = fields.Integer('Sequence')
|
|
|
|
path = fields.Char(compute='_get_path', string='Directory', readonly=True)
|
|
|
|
base = fields.Char(compute='_get_base_url', string='Base URL', readonly=True) # Could be renamed to a more explicit name like base_url
|
|
|
|
nginx = fields.Boolean('Nginx')
|
|
|
|
mode = fields.Selection([('disabled', 'Disabled'),
|
|
|
|
('poll', 'Poll'),
|
|
|
|
('hook', 'Hook')],
|
|
|
|
default='poll',
|
|
|
|
string="Mode", required=True, help="hook: Wait for webhook on /runbot/hook/<id> i.e. github push event")
|
2019-12-06 16:17:40 +07:00
|
|
|
hook_time = fields.Float('Last hook time', compute='_compute_hook_time')
|
|
|
|
get_ref_time = fields.Float('Last refs db update', compute='_compute_get_ref_time')
|
2018-02-28 16:31:05 +07:00
|
|
|
duplicate_id = fields.Many2one('runbot.repo', 'Duplicate repo', help='Repository for finding duplicate builds')
|
|
|
|
modules = fields.Char("Modules to install", help="Comma-separated list of modules to install and test.")
|
|
|
|
modules_auto = fields.Selection([('none', 'None (only explicit modules list)'),
|
|
|
|
('repo', 'Repository modules (excluding dependencies)'),
|
|
|
|
('all', 'All modules (including dependencies)')],
|
2019-04-22 20:49:33 +07:00
|
|
|
default='all',
|
2018-02-28 16:31:05 +07:00
|
|
|
string="Other modules to install automatically")
|
|
|
|
|
|
|
|
dependency_ids = fields.Many2many(
|
|
|
|
'runbot.repo', 'runbot_repo_dep_rel', column1='dependant_id', column2='dependency_id',
|
|
|
|
string='Extra dependencies',
|
|
|
|
help="Community addon repos which need to be present to run tests.")
|
|
|
|
token = fields.Char("Github token", groups="runbot.group_runbot_admin")
|
|
|
|
group_ids = fields.Many2many('res.groups', string='Limited to groups')
|
|
|
|
|
2020-01-02 22:57:51 +07:00
|
|
|
repo_config_id = fields.Many2one('runbot.build.config', 'Repo Config')
|
2019-04-22 20:49:33 +07:00
|
|
|
config_id = fields.Many2one('runbot.build.config', 'Run Config', compute='_compute_config_id', inverse='_inverse_config_id')
|
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
server_files = fields.Char('Server files', help='Comma separated list of possible server files') # odoo-bin,openerp-server,openerp-server.py
|
2019-10-26 00:19:27 +07:00
|
|
|
manifest_files = fields.Char('Manifest files', help='Comma separated list of possible manifest files', default='__manifest__.py')
|
|
|
|
addons_paths = fields.Char('Addons paths', help='Comma separated list of possible addons path', default='')
|
2019-11-08 18:03:00 +07:00
|
|
|
no_build = fields.Boolean("No build", help="Forbid creation of build on this repo", default=False)
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
|
2019-04-22 20:49:33 +07:00
|
|
|
def _compute_config_id(self):
|
|
|
|
for repo in self:
|
|
|
|
if repo.repo_config_id:
|
|
|
|
repo.config_id = repo.repo_config_id
|
|
|
|
else:
|
|
|
|
repo.config_id = self.env.ref('runbot.runbot_build_config_default')
|
|
|
|
|
|
|
|
def _inverse_config_id(self):
|
|
|
|
for repo in self:
|
|
|
|
repo.repo_config_id = repo.config_id
|
|
|
|
|
2019-12-06 16:17:40 +07:00
|
|
|
def _compute_get_ref_time(self):
|
|
|
|
self.env.cr.execute("""
|
|
|
|
SELECT repo_id, time FROM runbot_repo_reftime
|
|
|
|
WHERE id IN (
|
|
|
|
SELECT max(id) FROM runbot_repo_reftime
|
|
|
|
WHERE repo_id = any(%s) GROUP BY repo_id
|
|
|
|
)
|
|
|
|
""", [self.ids])
|
|
|
|
times = dict(self.env.cr.fetchall())
|
|
|
|
for repo in self:
|
|
|
|
repo.get_ref_time = times.get(repo.id, 0)
|
|
|
|
|
|
|
|
def _compute_hook_time(self):
|
|
|
|
self.env.cr.execute("""
|
|
|
|
SELECT repo_id, time FROM runbot_repo_hooktime
|
|
|
|
WHERE id IN (
|
|
|
|
SELECT max(id) FROM runbot_repo_hooktime
|
|
|
|
WHERE repo_id = any(%s) GROUP BY repo_id
|
|
|
|
)
|
|
|
|
""", [self.ids])
|
|
|
|
times = dict(self.env.cr.fetchall())
|
|
|
|
|
|
|
|
for repo in self:
|
|
|
|
repo.hook_time = times.get(repo.id, 0)
|
|
|
|
|
2020-01-06 14:50:57 +07:00
|
|
|
def set_hook_time(self, value):
|
|
|
|
for repo in self:
|
|
|
|
self.env['runbot.repo.hooktime'].create({'time': value, 'repo_id': repo.id})
|
|
|
|
self.invalidate_cache()
|
2019-12-06 16:17:40 +07:00
|
|
|
|
2020-01-06 14:50:57 +07:00
|
|
|
def set_ref_time(self, value):
|
2019-12-06 16:17:40 +07:00
|
|
|
for repo in self:
|
2020-01-06 14:50:57 +07:00
|
|
|
self.env['runbot.repo.reftime'].create({'time': value, 'repo_id': repo.id})
|
|
|
|
self.invalidate_cache()
|
2019-12-06 16:17:40 +07:00
|
|
|
|
|
|
|
def _gc_times(self):
|
|
|
|
self.env.cr.execute("""
|
|
|
|
DELETE from runbot_repo_reftime WHERE id NOT IN (
|
|
|
|
SELECT max(id) FROM runbot_repo_reftime GROUP BY repo_id
|
|
|
|
)
|
|
|
|
""")
|
|
|
|
self.env.cr.execute("""
|
|
|
|
DELETE from runbot_repo_hooktime WHERE id NOT IN (
|
|
|
|
SELECT max(id) FROM runbot_repo_hooktime GROUP BY repo_id
|
|
|
|
)
|
|
|
|
""")
|
|
|
|
|
2018-02-28 16:31:05 +07:00
|
|
|
def _root(self):
|
|
|
|
"""Return root directory of repository"""
|
|
|
|
default = os.path.join(os.path.dirname(__file__), '../static')
|
|
|
|
return os.path.abspath(default)
|
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
def _source_path(self, sha, *path):
|
|
|
|
"""
|
|
|
|
returns the absolute path to the source folder of the repo (adding option *path)
|
|
|
|
"""
|
|
|
|
self.ensure_one()
|
|
|
|
return os.path.join(self._root(), 'sources', self._get_repo_name_part(), sha, *path)
|
|
|
|
|
2018-02-28 16:31:05 +07:00
|
|
|
@api.depends('name')
|
|
|
|
def _get_path(self):
|
|
|
|
"""compute the server path of repo from the name"""
|
|
|
|
root = self._root()
|
|
|
|
for repo in self:
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
repo.path = os.path.join(root, 'repo', repo._sanitized_name(repo.name))
|
|
|
|
|
|
|
|
@api.model
|
|
|
|
def _sanitized_name(self, name):
|
|
|
|
for i in '@:/':
|
|
|
|
name = name.replace(i, '_')
|
|
|
|
return name
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
@api.depends('name')
|
|
|
|
def _get_base_url(self):
|
|
|
|
for repo in self:
|
|
|
|
name = re.sub('.+@', '', repo.name)
|
2018-03-12 23:43:24 +07:00
|
|
|
name = re.sub('^https://', '', name) # support https repo style
|
2018-02-28 16:31:05 +07:00
|
|
|
name = re.sub('.git$', '', name)
|
|
|
|
name = name.replace(':', '/')
|
|
|
|
repo.base = name
|
|
|
|
|
2019-04-19 14:12:10 +07:00
|
|
|
@api.depends('name', 'base')
|
|
|
|
def _compute_short_name(self):
|
|
|
|
for repo in self:
|
|
|
|
repo.short_name = '/'.join(repo.base.split('/')[-2:])
|
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
def _get_repo_name_part(self):
|
2019-07-25 20:07:28 +07:00
|
|
|
self.ensure_one()
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
return self._sanitized_name(self.name.split('/')[-1])
|
|
|
|
|
2018-02-28 16:31:05 +07:00
|
|
|
def _git(self, cmd):
|
|
|
|
"""Execute a git command 'cmd'"""
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
self.ensure_one()
|
2019-08-22 19:36:30 +07:00
|
|
|
_logger.debug("git command: git (dir %s) %s", self.short_name, ' '.join(cmd))
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
cmd = ['git', '--git-dir=%s' % self.path] + cmd
|
|
|
|
return subprocess.check_output(cmd).decode('utf-8')
|
2018-02-28 16:31:05 +07:00
|
|
|
|
2019-03-08 16:48:04 +07:00
|
|
|
def _git_rev_parse(self, branch_name):
|
|
|
|
return self._git(['rev-parse', branch_name]).strip()
|
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
def _git_export(self, sha):
|
|
|
|
"""Export a git repo into a sources"""
|
|
|
|
# TODO add automated tests
|
2018-02-28 16:31:05 +07:00
|
|
|
self.ensure_one()
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
export_path = self._source_path(sha)
|
|
|
|
|
|
|
|
if os.path.isdir(export_path):
|
|
|
|
_logger.info('git export: checkouting to %s (already exists)' % export_path)
|
|
|
|
return export_path
|
|
|
|
|
|
|
|
if not self._hash_exists(sha):
|
|
|
|
self._update(force=True)
|
2019-11-27 21:17:16 +07:00
|
|
|
if not self._hash_exists(sha):
|
|
|
|
try:
|
|
|
|
result = self._git(['fetch', 'origin', sha])
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
if not self._hash_exists(sha):
|
2019-12-03 22:58:15 +07:00
|
|
|
raise RunbotException("Commit %s is unreachable. Did you force push the branch since build creation?" % sha)
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
|
|
|
|
_logger.info('git export: checkouting to %s (new)' % export_path)
|
|
|
|
os.makedirs(export_path)
|
2019-08-22 19:36:30 +07:00
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
p1 = subprocess.Popen(['git', '--git-dir=%s' % self.path, 'archive', sha], stdout=subprocess.PIPE)
|
|
|
|
p2 = subprocess.Popen(['tar', '-xmC', export_path], stdin=p1.stdout, stdout=subprocess.PIPE)
|
2018-02-28 16:31:05 +07:00
|
|
|
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
|
2019-08-22 19:36:30 +07:00
|
|
|
(out, err) = p2.communicate()
|
|
|
|
if err:
|
2019-12-03 22:58:15 +07:00
|
|
|
raise RunbotException("Archive %s failed. Did you force push the branch since build creation? (%s)" % (sha, err))
|
2019-08-22 19:36:30 +07:00
|
|
|
|
2019-12-13 18:25:42 +07:00
|
|
|
# migration scripts link if necessary
|
|
|
|
icp = self.env['ir.config_parameter']
|
|
|
|
ln_param = icp.get_param('runbot_migration_ln', default='')
|
|
|
|
migration_repo_id = int(icp.get_param('runbot_migration_repo_id', default=0))
|
|
|
|
if ln_param and migration_repo_id and self.server_files:
|
|
|
|
scripts_dir = self.env['runbot.repo'].browse(migration_repo_id)._get_repo_name_part()
|
2020-01-17 23:14:12 +07:00
|
|
|
try:
|
|
|
|
os.symlink('/data/build/%s' % scripts_dir, self._source_path(sha, ln_param))
|
|
|
|
except FileNotFoundError:
|
|
|
|
_logger.warning('Impossible to create migration symlink')
|
2019-12-13 18:25:42 +07:00
|
|
|
|
[IMP] runbot: share sources between builds
Multibuild can create generate a lots of checkout, especially for small
and fast jobs, which can overload runbot discs since we are trying not
to clean build immediatly. (To ease bug fix and allow wake up)
This commit proposes to store source on a single place, so that
docker can add them as ro volume in the build directory.
The checkout is also moved to the installs jobs, so that
builds containing only create builds steps won't checkout
the sources.
This change implies to use --addons-path correctly, since odoo
and enterprise addons wont be merged in the same repo anymore.
This will allow to test addons a dev will do, with a closer
command line.
This implies to change the code structure a litle, some changes
where made to remove no-so-usefull fields on build, and some
hard-coded logic (manifest_names and server_names) are now
stored on repo instead.
This changes implies that a build CANNOT write in his sources.
It shouldn't be the case, but it means that runbot cannot be
tested on runbot untill datas are written elsewhere than in static.
Other possibilities are possible, like bind mounting the sources
in the build directory instead of adding ro volumes in docker.
Unfortunately, this needs to give access to mount as sudo for
runbot user and changes docjker config to allow mounts
in volumes which is not the case by default. A plus of this
solution would be to be able to make an overlay mount.
2019-07-08 19:44:32 +07:00
|
|
|
# TODO get result and fallback on cleaing in case of problem
|
|
|
|
return export_path
|
2018-02-28 16:31:05 +07:00
|
|
|
|
2019-02-25 21:18:10 +07:00
|
|
|
def _hash_exists(self, commit_hash):
|
|
|
|
""" Verify that a commit hash exists in the repo """
|
|
|
|
self.ensure_one()
|
|
|
|
try:
|
|
|
|
self._git(['cat-file', '-e', commit_hash])
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2019-06-05 22:11:10 +07:00
|
|
|
def _github(self, url, payload=None, ignore_errors=False, nb_tries=2):
|
2018-02-28 16:31:05 +07:00
|
|
|
"""Return a http request to be sent to github"""
|
|
|
|
for repo in self:
|
|
|
|
if not repo.token:
|
|
|
|
return
|
2019-06-05 22:11:10 +07:00
|
|
|
match_object = re.search('([^/]+)/([^/]+)/([^/.]+(.git)?)', repo.base)
|
|
|
|
if match_object:
|
|
|
|
url = url.replace(':owner', match_object.group(2))
|
|
|
|
url = url.replace(':repo', match_object.group(3))
|
|
|
|
url = 'https://api.%s%s' % (match_object.group(1), url)
|
|
|
|
session = requests.Session()
|
|
|
|
session.auth = (repo.token, 'x-oauth-basic')
|
|
|
|
session.headers.update({'Accept': 'application/vnd.github.she-hulk-preview+json'})
|
|
|
|
try_count = 0
|
|
|
|
while try_count < nb_tries:
|
|
|
|
try:
|
|
|
|
if payload:
|
|
|
|
response = session.post(url, data=json.dumps(payload))
|
|
|
|
else:
|
|
|
|
response = session.get(url)
|
|
|
|
response.raise_for_status()
|
|
|
|
if try_count > 0:
|
|
|
|
_logger.info('Success after %s tries' % (try_count + 1))
|
|
|
|
return response.json()
|
|
|
|
except Exception as e:
|
|
|
|
try_count += 1
|
|
|
|
if try_count < nb_tries:
|
|
|
|
time.sleep(2)
|
|
|
|
else:
|
|
|
|
if ignore_errors:
|
|
|
|
_logger.exception('Ignored github error %s %r (try %s/%s)' % (url, payload, try_count + 1, nb_tries))
|
|
|
|
else:
|
|
|
|
raise
|
2018-02-28 16:31:05 +07:00
|
|
|
|
2019-05-17 19:49:57 +07:00
|
|
|
def _get_fetch_head_time(self):
|
|
|
|
self.ensure_one()
|
|
|
|
fname_fetch_head = os.path.join(self.path, 'FETCH_HEAD')
|
|
|
|
if os.path.exists(fname_fetch_head):
|
|
|
|
return os.path.getmtime(fname_fetch_head)
|
|
|
|
|
2019-04-30 20:31:01 +07:00
|
|
|
def _get_refs(self):
|
|
|
|
"""Find new refs
|
|
|
|
:return: list of tuples with following refs informations:
|
|
|
|
name, sha, date, author, author_email, subject, committer, committer_email
|
|
|
|
"""
|
2018-02-28 16:31:05 +07:00
|
|
|
self.ensure_one()
|
2019-05-17 19:49:57 +07:00
|
|
|
|
2019-12-09 22:28:40 +07:00
|
|
|
get_ref_time = round(self._get_fetch_head_time(), 4)
|
2019-06-24 20:56:06 +07:00
|
|
|
if not self.get_ref_time or get_ref_time > self.get_ref_time:
|
2020-01-06 14:50:57 +07:00
|
|
|
self.set_ref_time(get_ref_time)
|
2019-05-17 19:49:57 +07:00
|
|
|
fields = ['refname', 'objectname', 'committerdate:iso8601', 'authorname', 'authoremail', 'subject', 'committername', 'committeremail']
|
|
|
|
fmt = "%00".join(["%(" + field + ")" for field in fields])
|
|
|
|
git_refs = self._git(['for-each-ref', '--format', fmt, '--sort=-committerdate', 'refs/heads', 'refs/pull'])
|
|
|
|
git_refs = git_refs.strip()
|
|
|
|
return [tuple(field for field in line.split('\x00')) for line in git_refs.split('\n')]
|
|
|
|
else:
|
|
|
|
return []
|
2019-04-30 20:31:01 +07:00
|
|
|
|
|
|
|
def _find_or_create_branches(self, refs):
|
|
|
|
"""Parse refs and create branches that does not exists yet
|
|
|
|
:param refs: list of tuples returned by _get_refs()
|
|
|
|
:return: dict {branch.name: branch.id}
|
|
|
|
The returned structure contains all the branches from refs newly created
|
|
|
|
or older ones.
|
|
|
|
"""
|
|
|
|
Branch = self.env['runbot.branch']
|
2018-02-28 16:31:05 +07:00
|
|
|
self.env.cr.execute("""
|
|
|
|
WITH t (branch) AS (SELECT unnest(%s))
|
|
|
|
SELECT t.branch, b.id
|
|
|
|
FROM t LEFT JOIN runbot_branch b ON (b.name = t.branch)
|
|
|
|
WHERE b.repo_id = %s;
|
2019-03-08 16:48:04 +07:00
|
|
|
""", ([r[0] for r in refs], self.id))
|
2018-02-28 16:31:05 +07:00
|
|
|
ref_branches = {r[0]: r[1] for r in self.env.cr.fetchall()}
|
|
|
|
|
|
|
|
for name, sha, date, author, author_email, subject, committer, committer_email in refs:
|
2019-04-30 20:31:01 +07:00
|
|
|
if not ref_branches.get(name):
|
2019-03-08 16:48:04 +07:00
|
|
|
_logger.debug('repo %s found new branch %s', self.name, name)
|
2019-04-30 20:31:01 +07:00
|
|
|
new_branch = Branch.create({'repo_id': self.id, 'name': name})
|
|
|
|
ref_branches[name] = new_branch.id
|
|
|
|
return ref_branches
|
|
|
|
|
|
|
|
def _find_new_commits(self, refs, ref_branches):
|
|
|
|
"""Find new commits in bare repo
|
|
|
|
:param refs: list of tuples returned by _get_refs()
|
|
|
|
:param ref_branches: dict structure {branch.name: branch.id}
|
|
|
|
described in _find_or_create_branches
|
|
|
|
"""
|
|
|
|
self.ensure_one()
|
|
|
|
Branch = self.env['runbot.branch']
|
|
|
|
Build = self.env['runbot.build']
|
|
|
|
icp = self.env['ir.config_parameter']
|
|
|
|
max_age = int(icp.get_param('runbot.runbot_max_age', default=30))
|
|
|
|
|
|
|
|
self.env.cr.execute("""
|
2019-11-12 21:25:23 +07:00
|
|
|
SELECT DISTINCT ON (branch_id) name, branch_id
|
2020-02-20 21:34:08 +07:00
|
|
|
FROM runbot_build WHERE branch_id in %s AND build_type = 'normal' AND parent_id is null ORDER BY branch_id,id DESC;
|
2019-11-12 21:25:23 +07:00
|
|
|
""", (tuple([ref_branches[r[0]] for r in refs]),))
|
2019-04-30 20:31:01 +07:00
|
|
|
# generate a set of tuples (branch_id, sha)
|
|
|
|
builds_candidates = {(r[1], r[0]) for r in self.env.cr.fetchall()}
|
|
|
|
|
|
|
|
for name, sha, date, author, author_email, subject, committer, committer_email in refs:
|
|
|
|
branch = Branch.browse(ref_branches[name])
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
# skip the build for old branches (Could be checked before creating the branch in DB ?)
|
|
|
|
if dateutil.parser.parse(date[:19]) + datetime.timedelta(days=max_age) < datetime.datetime.now():
|
|
|
|
continue
|
|
|
|
|
|
|
|
# create build (and mark previous builds as skipped) if not found
|
2019-04-30 20:31:01 +07:00
|
|
|
if not (branch.id, sha) in builds_candidates:
|
2019-12-19 19:26:21 +07:00
|
|
|
if branch.no_auto_build or branch.no_build or (branch.repo_id.no_build and not branch.rebuild_requested):
|
2019-04-22 20:49:33 +07:00
|
|
|
continue
|
2019-12-19 19:26:21 +07:00
|
|
|
if branch.rebuild_requested:
|
|
|
|
branch.rebuild_requested = False
|
2019-04-30 20:31:01 +07:00
|
|
|
_logger.debug('repo %s branch %s new build found revno %s', self.name, branch.name, sha)
|
2018-02-28 16:31:05 +07:00
|
|
|
build_info = {
|
|
|
|
'branch_id': branch.id,
|
|
|
|
'name': sha,
|
|
|
|
'author': author,
|
|
|
|
'author_email': author_email,
|
|
|
|
'committer': committer,
|
|
|
|
'committer_email': committer_email,
|
|
|
|
'subject': subject,
|
|
|
|
'date': dateutil.parser.parse(date[:19]),
|
2019-12-04 21:44:00 +07:00
|
|
|
'build_type': 'normal',
|
2018-02-28 16:31:05 +07:00
|
|
|
}
|
|
|
|
if not branch.sticky:
|
|
|
|
# pending builds are skipped as we have a new ref
|
|
|
|
builds_to_skip = Build.search(
|
2019-04-22 20:49:33 +07:00
|
|
|
[('branch_id', '=', branch.id), ('local_state', '=', 'pending')],
|
2018-02-28 16:31:05 +07:00
|
|
|
order='sequence asc')
|
2018-03-28 18:18:50 +07:00
|
|
|
builds_to_skip._skip(reason='New ref found')
|
2018-02-28 16:31:05 +07:00
|
|
|
if builds_to_skip:
|
|
|
|
build_info['sequence'] = builds_to_skip[0].sequence
|
2018-06-06 21:31:33 +07:00
|
|
|
|
2020-02-20 16:27:22 +07:00
|
|
|
Build.create(build_info)
|
2020-01-02 22:16:58 +07:00
|
|
|
|
2019-04-30 20:31:01 +07:00
|
|
|
def _create_pending_builds(self):
|
2019-02-06 20:37:36 +07:00
|
|
|
""" Find new commits in physical repos"""
|
2019-04-30 20:31:01 +07:00
|
|
|
refs = {}
|
|
|
|
ref_branches = {}
|
|
|
|
for repo in self:
|
2019-02-06 20:37:36 +07:00
|
|
|
try:
|
2019-05-20 15:52:13 +07:00
|
|
|
ref = repo._get_refs()
|
2019-06-24 19:18:01 +07:00
|
|
|
max_age = int(self.env['ir.config_parameter'].get_param('runbot.runbot_max_age', default=30))
|
|
|
|
good_refs = [r for r in ref if dateutil.parser.parse(r[2][:19]) + datetime.timedelta(days=max_age) > datetime.datetime.now()]
|
|
|
|
if good_refs:
|
|
|
|
refs[repo] = good_refs
|
2019-02-06 20:37:36 +07:00
|
|
|
except Exception:
|
2019-04-30 20:31:01 +07:00
|
|
|
_logger.exception('Fail to get refs for repo %s', repo.name)
|
|
|
|
if repo in refs:
|
|
|
|
ref_branches[repo] = repo._find_or_create_branches(refs[repo])
|
|
|
|
|
|
|
|
# keep _find_or_create_branches separated from build creation to ease
|
|
|
|
# closest branch detection
|
|
|
|
for repo in self:
|
|
|
|
if repo in refs:
|
|
|
|
repo._find_new_commits(refs[repo], ref_branches[repo])
|
2019-02-06 20:37:36 +07:00
|
|
|
|
|
|
|
def _clone(self):
|
|
|
|
""" Clone the remote repo if needed """
|
|
|
|
self.ensure_one()
|
|
|
|
repo = self
|
|
|
|
if not os.path.isdir(os.path.join(repo.path, 'refs')):
|
|
|
|
_logger.info("Cloning repository '%s' in '%s'" % (repo.name, repo.path))
|
|
|
|
subprocess.call(['git', 'clone', '--bare', repo.name, repo.path])
|
|
|
|
|
2019-02-25 21:18:10 +07:00
|
|
|
def _update_git(self, force):
|
2019-02-06 20:37:36 +07:00
|
|
|
""" Update the git repo on FS """
|
|
|
|
self.ensure_one()
|
|
|
|
repo = self
|
|
|
|
_logger.debug('repo %s updating branches', repo.name)
|
|
|
|
|
|
|
|
if not os.path.isdir(os.path.join(repo.path)):
|
|
|
|
os.makedirs(repo.path)
|
|
|
|
self._clone()
|
|
|
|
|
|
|
|
# check for mode == hook
|
|
|
|
fname_fetch_head = os.path.join(repo.path, 'FETCH_HEAD')
|
2019-02-25 21:18:10 +07:00
|
|
|
if not force and os.path.isfile(fname_fetch_head):
|
2019-02-06 20:37:36 +07:00
|
|
|
fetch_time = os.path.getmtime(fname_fetch_head)
|
2019-07-03 21:02:15 +07:00
|
|
|
if repo.mode == 'hook' and (not repo.hook_time or repo.hook_time < fetch_time):
|
2019-02-06 20:37:36 +07:00
|
|
|
t0 = time.time()
|
|
|
|
_logger.debug('repo %s skip hook fetch fetch_time: %ss ago hook_time: %ss ago',
|
2019-07-03 21:02:15 +07:00
|
|
|
repo.name, int(t0 - fetch_time), int(t0 - repo.hook_time) if repo.hook_time else 'never')
|
2019-02-06 20:37:36 +07:00
|
|
|
return
|
2019-04-22 20:49:33 +07:00
|
|
|
|
2019-05-03 18:51:09 +07:00
|
|
|
self._update_fetch_cmd()
|
2019-02-06 20:37:36 +07:00
|
|
|
|
2019-05-03 18:51:09 +07:00
|
|
|
def _update_fetch_cmd(self):
|
|
|
|
# Extracted from update_git to be easily overriden in external module
|
|
|
|
self.ensure_one()
|
|
|
|
repo = self
|
2020-02-25 22:23:24 +07:00
|
|
|
try:
|
|
|
|
repo._git(['fetch', '-p', 'origin', '+refs/heads/*:refs/heads/*', '+refs/pull/*/head:refs/pull/*'])
|
|
|
|
except subprocess.CalledProcessError as e:
|
|
|
|
message = 'Failed to fetch repo %s with return code %s. Original command was %s' % (repo.name, e.returncode, e.cmd)
|
|
|
|
_logger.exception(message)
|
|
|
|
host = self.env['runbot.host'].search([('name', '=', fqdn())])
|
|
|
|
host.disable()
|
2019-02-06 20:37:36 +07:00
|
|
|
|
2019-04-30 20:31:01 +07:00
|
|
|
def _update(self, force=True):
|
2018-02-28 16:31:05 +07:00
|
|
|
""" Update the physical git reposotories on FS"""
|
2019-07-03 18:50:31 +07:00
|
|
|
for repo in reversed(self):
|
2018-02-28 16:31:05 +07:00
|
|
|
try:
|
2019-11-27 21:17:16 +07:00
|
|
|
repo._update_git(force) # TODO xdo, check gc log and log warning
|
2018-02-28 16:31:05 +07:00
|
|
|
except Exception:
|
|
|
|
_logger.exception('Fail to update repo %s', repo.name)
|
|
|
|
|
2019-12-17 17:27:11 +07:00
|
|
|
def _commit(self):
|
2019-05-22 15:11:58 +07:00
|
|
|
self.env.cr.commit()
|
2020-01-14 17:12:49 +07:00
|
|
|
self.env.cache.invalidate()
|
2020-01-13 22:16:12 +07:00
|
|
|
self.env.clear()
|
2018-02-28 16:31:05 +07:00
|
|
|
|
2019-12-17 17:27:11 +07:00
|
|
|
def _scheduler(self, host):
|
|
|
|
nb_workers = host.get_nb_worker()
|
|
|
|
|
2020-01-08 21:46:05 +07:00
|
|
|
self._gc_testing(host)
|
|
|
|
self._commit()
|
2019-12-17 17:27:11 +07:00
|
|
|
for build in self._get_builds_with_requested_actions(host):
|
|
|
|
build._process_requested_actions()
|
|
|
|
self._commit()
|
|
|
|
for build in self._get_builds_to_schedule(host):
|
|
|
|
build._schedule()
|
|
|
|
self._commit()
|
|
|
|
self._assign_pending_builds(host, nb_workers, [('build_type', '!=', 'scheduled')])
|
|
|
|
self._commit()
|
|
|
|
self._assign_pending_builds(host, nb_workers-1 or nb_workers)
|
|
|
|
self._commit()
|
|
|
|
for build in self._get_builds_to_init(host):
|
|
|
|
build._init_pendings(host)
|
|
|
|
self._commit()
|
|
|
|
self._gc_running(host)
|
|
|
|
self._commit()
|
|
|
|
self._reload_nginx()
|
|
|
|
|
|
|
|
def build_domain_host(self, host, domain=None):
|
|
|
|
domain = domain or []
|
|
|
|
return [('repo_id', 'in', self.ids), ('host', '=', host.name)] + domain
|
|
|
|
|
|
|
|
def _get_builds_with_requested_actions(self, host):
|
|
|
|
return self.env['runbot.build'].search(self.build_domain_host(host, [('requested_action', 'in', ['wake_up', 'deathrow'])]))
|
|
|
|
|
|
|
|
def _get_builds_to_schedule(self, host):
|
|
|
|
return self.env['runbot.build'].search(self.build_domain_host(host, [('local_state', 'in', ['testing', 'running'])]))
|
|
|
|
|
|
|
|
def _assign_pending_builds(self, host, nb_workers, domain=None):
|
|
|
|
if not self.ids or host.assigned_only or nb_workers <= 0:
|
|
|
|
return
|
|
|
|
domain_host = self.build_domain_host(host)
|
|
|
|
reserved_slots = self.env['runbot.build'].search_count(domain_host + [('local_state', 'in', ('testing', 'pending'))])
|
|
|
|
assignable_slots = (nb_workers - reserved_slots)
|
|
|
|
if assignable_slots > 0:
|
|
|
|
allocated = self._allocate_builds(host, assignable_slots, domain)
|
|
|
|
if allocated:
|
|
|
|
_logger.debug('Builds %s where allocated to runbot' % allocated)
|
|
|
|
|
|
|
|
def _get_builds_to_init(self, host):
|
|
|
|
domain_host = self.build_domain_host(host)
|
|
|
|
used_slots = self.env['runbot.build'].search_count(domain_host + [('local_state', '=', 'testing')])
|
|
|
|
available_slots = host.get_nb_worker() - used_slots
|
|
|
|
if available_slots <= 0:
|
|
|
|
return self.env['runbot.build']
|
|
|
|
return self.env['runbot.build'].search(domain_host + [('local_state', '=', 'pending')], limit=available_slots)
|
|
|
|
|
|
|
|
def _gc_running(self, host):
|
|
|
|
running_max = host.get_running_max()
|
2018-02-28 16:31:05 +07:00
|
|
|
# terminate and reap doomed build
|
2019-12-17 17:27:11 +07:00
|
|
|
domain_host = self.build_domain_host(host)
|
|
|
|
Build = self.env['runbot.build']
|
|
|
|
# some builds are marked as keep running
|
|
|
|
cannot_be_killed_ids = Build.search(domain_host + [('keep_running', '!=', True)]).ids
|
|
|
|
# we want to keep one build running per sticky, no mather which host
|
|
|
|
sticky_branches_ids = self.env['runbot.branch'].search([('sticky', '=', True)]).ids
|
|
|
|
# search builds on host on sticky branches, order by position in branch history
|
|
|
|
if sticky_branches_ids:
|
|
|
|
self.env.cr.execute("""
|
|
|
|
SELECT
|
|
|
|
id
|
|
|
|
FROM (
|
|
|
|
SELECT
|
|
|
|
bu.id AS id,
|
|
|
|
bu.host as host,
|
|
|
|
row_number() OVER (PARTITION BY branch_id order by bu.id desc) AS row
|
|
|
|
FROM
|
|
|
|
runbot_branch br INNER JOIN runbot_build bu ON br.id=bu.branch_id
|
|
|
|
WHERE
|
|
|
|
br.id in %s AND (bu.hidden = 'f' OR bu.hidden IS NULL)
|
|
|
|
) AS br_bu
|
|
|
|
WHERE
|
|
|
|
row <= 4 AND host = %s
|
|
|
|
ORDER BY row, id desc
|
|
|
|
""", [tuple(sticky_branches_ids), host.name]
|
|
|
|
)
|
|
|
|
cannot_be_killed_ids += self.env.cr.fetchall()
|
|
|
|
cannot_be_killed_ids = cannot_be_killed_ids[:running_max] # ensure that we don't try to keep more than we can handle
|
|
|
|
|
|
|
|
build_ids = Build.search(domain_host + [('local_state', '=', 'running'), ('id', 'not in', cannot_be_killed_ids)], order='job_start desc').ids
|
2018-02-28 16:31:05 +07:00
|
|
|
Build.browse(build_ids)[running_max:]._kill()
|
2019-12-17 17:27:11 +07:00
|
|
|
|
2020-01-08 21:46:05 +07:00
|
|
|
def _gc_testing(self, host):
|
|
|
|
"""garbage collect builds that could be killed"""
|
|
|
|
# decide if we need room
|
|
|
|
Build = self.env['runbot.build']
|
|
|
|
domain_host = self.build_domain_host(host)
|
2020-02-18 17:53:35 +07:00
|
|
|
testing_builds = Build.search(domain_host + [('local_state', 'in', ['testing', 'pending']), ('requested_action', '!=', 'deathrow')])
|
2020-01-08 21:46:05 +07:00
|
|
|
used_slots = len(testing_builds)
|
|
|
|
available_slots = host.get_nb_worker() - used_slots
|
|
|
|
nb_pending = Build.search_count([('local_state', '=', 'pending'), ('host', '=', False)])
|
|
|
|
if available_slots > 0 or nb_pending == 0:
|
|
|
|
return
|
|
|
|
for build in testing_builds:
|
|
|
|
top_parent = build._get_top_parent()
|
|
|
|
if not build.branch_id.sticky:
|
|
|
|
newer_candidates = Build.search([
|
|
|
|
('id', '>', build.id),
|
|
|
|
('branch_id', '=', build.branch_id.id),
|
|
|
|
('build_type', '=', 'normal'),
|
|
|
|
('parent_id', '=', False),
|
|
|
|
('hidden', '=', False),
|
|
|
|
('config_id', '=', top_parent.config_id.id)
|
|
|
|
])
|
|
|
|
if newer_candidates:
|
|
|
|
top_parent._ask_kill(message='Build automatically killed, newer build found %s.' % newer_candidates.ids)
|
|
|
|
|
2019-12-17 17:27:11 +07:00
|
|
|
def _allocate_builds(self, host, nb_slots, domain=None):
|
|
|
|
if nb_slots <= 0:
|
|
|
|
return []
|
|
|
|
non_allocated_domain = [('repo_id', 'in', self.ids), ('local_state', '=', 'pending'), ('host', '=', False)]
|
|
|
|
if domain:
|
|
|
|
non_allocated_domain = expression.AND([non_allocated_domain, domain])
|
|
|
|
e = expression.expression(non_allocated_domain, self.env['runbot.build'])
|
|
|
|
assert e.get_tables() == ['"runbot_build"']
|
|
|
|
where_clause, where_params = e.to_sql()
|
|
|
|
|
|
|
|
# self-assign to be sure that another runbot instance cannot self assign the same builds
|
|
|
|
query = """UPDATE
|
|
|
|
runbot_build
|
|
|
|
SET
|
|
|
|
host = %%s
|
|
|
|
WHERE
|
|
|
|
runbot_build.id IN (
|
|
|
|
SELECT runbot_build.id
|
|
|
|
FROM runbot_build
|
|
|
|
LEFT JOIN runbot_branch
|
|
|
|
ON runbot_branch.id = runbot_build.branch_id
|
|
|
|
WHERE
|
|
|
|
%s
|
|
|
|
ORDER BY
|
|
|
|
array_position(array['normal','rebuild','indirect','scheduled']::varchar[], runbot_build.build_type) ASC,
|
|
|
|
runbot_branch.sticky DESC,
|
|
|
|
runbot_branch.priority DESC,
|
|
|
|
runbot_build.sequence ASC
|
|
|
|
FOR UPDATE OF runbot_build SKIP LOCKED
|
|
|
|
LIMIT %%s
|
|
|
|
)
|
|
|
|
RETURNING id""" % where_clause
|
|
|
|
self.env.cr.execute(query, [host.name] + where_params + [nb_slots])
|
|
|
|
return self.env.cr.fetchall()
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
def _domain(self):
|
|
|
|
return self.env.get('ir.config_parameter').get_param('runbot.runbot_domain', fqdn())
|
|
|
|
|
|
|
|
def _reload_nginx(self):
|
|
|
|
settings = {}
|
|
|
|
settings['port'] = config.get('http_port')
|
|
|
|
settings['runbot_static'] = os.path.join(get_module_resource('runbot', 'static'), '')
|
|
|
|
nginx_dir = os.path.join(self._root(), 'nginx')
|
|
|
|
settings['nginx_dir'] = nginx_dir
|
|
|
|
settings['re_escape'] = re.escape
|
2018-06-01 21:37:43 +07:00
|
|
|
settings['fqdn'] = fqdn()
|
2018-02-28 16:31:05 +07:00
|
|
|
nginx_repos = self.search([('nginx', '=', True)], order='id')
|
|
|
|
if nginx_repos:
|
2019-04-22 20:49:33 +07:00
|
|
|
settings['builds'] = self.env['runbot.build'].search([('repo_id', 'in', nginx_repos.ids), ('local_state', '=', 'running'), ('host', '=', fqdn())])
|
2018-02-28 16:31:05 +07:00
|
|
|
|
|
|
|
nginx_config = self.env['ir.ui.view'].render_template("runbot.nginx_config", settings)
|
|
|
|
os.makedirs(nginx_dir, exist_ok=True)
|
2019-07-22 18:02:24 +07:00
|
|
|
content = None
|
2019-12-04 19:31:34 +07:00
|
|
|
nginx_conf_path = os.path.join(nginx_dir, 'nginx.conf')
|
|
|
|
content = ''
|
|
|
|
if os.path.isfile(nginx_conf_path):
|
|
|
|
with open(nginx_conf_path, 'rb') as f:
|
|
|
|
content = f.read()
|
2019-07-22 18:02:24 +07:00
|
|
|
if content != nginx_config:
|
2018-02-28 16:31:05 +07:00
|
|
|
_logger.debug('reload nginx')
|
2019-12-04 19:31:34 +07:00
|
|
|
with open(nginx_conf_path, 'wb') as f:
|
2019-07-22 18:02:24 +07:00
|
|
|
f.write(nginx_config)
|
|
|
|
try:
|
|
|
|
pid = int(open(os.path.join(nginx_dir, 'nginx.pid')).read().strip(' \n'))
|
|
|
|
os.kill(pid, signal.SIGHUP)
|
|
|
|
except Exception:
|
|
|
|
_logger.debug('start nginx')
|
|
|
|
if subprocess.call(['/usr/sbin/nginx', '-p', nginx_dir, '-c', 'nginx.conf']):
|
|
|
|
# obscure nginx bug leaving orphan worker listening on nginx port
|
|
|
|
if not subprocess.call(['pkill', '-f', '-P1', 'nginx: worker']):
|
|
|
|
_logger.debug('failed to start nginx - orphan worker killed, retrying')
|
|
|
|
subprocess.call(['/usr/sbin/nginx', '-p', nginx_dir, '-c', 'nginx.conf'])
|
|
|
|
else:
|
|
|
|
_logger.debug('failed to start nginx - failed to kill orphan worker - oh well')
|
2018-02-28 16:31:05 +07:00
|
|
|
|
2019-02-06 20:37:36 +07:00
|
|
|
def _get_cron_period(self, min_margin=120):
|
|
|
|
""" Compute a randomized cron period with a 2 min margin below
|
|
|
|
real cron timeout from config.
|
|
|
|
"""
|
|
|
|
cron_limit = config.get('limit_time_real_cron')
|
|
|
|
req_limit = config.get('limit_time_real')
|
|
|
|
cron_timeout = cron_limit if cron_limit > -1 else req_limit
|
|
|
|
return cron_timeout - (min_margin + random.randint(1, 60))
|
|
|
|
|
|
|
|
def _cron_fetch_and_schedule(self, hostname):
|
|
|
|
"""This method have to be called from a dedicated cron on a runbot
|
|
|
|
in charge of orchestration.
|
|
|
|
"""
|
2019-11-10 23:16:41 +07:00
|
|
|
|
2019-02-06 20:37:36 +07:00
|
|
|
if hostname != fqdn():
|
|
|
|
return 'Not for me'
|
2019-07-04 19:57:50 +07:00
|
|
|
|
2019-02-06 20:37:36 +07:00
|
|
|
start_time = time.time()
|
|
|
|
timeout = self._get_cron_period()
|
|
|
|
icp = self.env['ir.config_parameter']
|
|
|
|
update_frequency = int(icp.get_param('runbot.runbot_update_frequency', default=10))
|
|
|
|
while time.time() - start_time < timeout:
|
|
|
|
repos = self.search([('mode', '!=', 'disabled')])
|
2019-04-30 20:31:01 +07:00
|
|
|
repos._update(force=False)
|
|
|
|
repos._create_pending_builds()
|
2019-12-17 17:27:11 +07:00
|
|
|
self._commit()
|
2019-02-06 20:37:36 +07:00
|
|
|
time.sleep(update_frequency)
|
|
|
|
|
|
|
|
def _cron_fetch_and_build(self, hostname):
|
|
|
|
""" This method have to be called from a dedicated cron
|
|
|
|
created on each runbot instance.
|
|
|
|
"""
|
2019-11-10 23:16:41 +07:00
|
|
|
|
2019-02-06 20:37:36 +07:00
|
|
|
if hostname != fqdn():
|
|
|
|
return 'Not for me'
|
2019-11-10 23:16:41 +07:00
|
|
|
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
host = self.env['runbot.host']._get_current()
|
|
|
|
host.set_psql_conn_count()
|
2020-01-20 21:10:57 +07:00
|
|
|
host._bootstrap()
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
host.last_start_loop = fields.Datetime.now()
|
2019-12-17 17:27:11 +07:00
|
|
|
|
|
|
|
self._commit()
|
2019-02-06 20:37:36 +07:00
|
|
|
start_time = time.time()
|
2019-07-22 17:30:22 +07:00
|
|
|
# 1. source cleanup
|
|
|
|
# -> Remove sources when no build is using them
|
|
|
|
# (could be usefull to keep them for wakeup but we can checkout them again if not forced push)
|
|
|
|
self.env['runbot.repo']._source_cleanup()
|
|
|
|
# 2. db and log cleanup
|
|
|
|
# -> Keep them as long as possible
|
|
|
|
self.env['runbot.build']._local_cleanup()
|
2019-10-21 22:41:59 +07:00
|
|
|
# 3. docker cleanup
|
2019-12-17 17:27:11 +07:00
|
|
|
self.env['runbot.repo']._docker_cleanup()
|
2020-01-20 21:10:57 +07:00
|
|
|
host._docker_build()
|
2019-10-21 22:41:59 +07:00
|
|
|
|
2019-02-06 20:37:36 +07:00
|
|
|
timeout = self._get_cron_period()
|
|
|
|
icp = self.env['ir.config_parameter']
|
|
|
|
update_frequency = int(icp.get_param('runbot.runbot_update_frequency', default=10))
|
|
|
|
while time.time() - start_time < timeout:
|
2019-12-17 17:27:11 +07:00
|
|
|
time.sleep(self._scheduler_loop_turn(host, update_frequency))
|
[WIP] runbot: monitoring tools
Add a new model runbot.host to keep info and configuration about
hosts (worker servers), like number of worker, reserved or not,
ping times (last start loop, successful iteration, end loop, ...)
and also last errors, number of testing per host, psql connection
count, ...
A new monitoring frontend page is created, similar to glances
but with additionnal information like hosts states and
last_monitored builds (for nightly)
Later this model will be used for runbot_build host instead of char.
Host are automaticaly created when running _scheduler.
2019-08-19 21:03:14 +07:00
|
|
|
|
|
|
|
host.last_end_loop = fields.Datetime.now()
|
2019-07-22 17:30:22 +07:00
|
|
|
|
2019-12-17 17:27:11 +07:00
|
|
|
def _scheduler_loop_turn(self, host, default_sleep=1):
|
|
|
|
repos = self.search([('mode', '!=', 'disabled')])
|
|
|
|
try:
|
|
|
|
repos._scheduler(host)
|
|
|
|
host.last_success = fields.Datetime.now()
|
|
|
|
self._commit()
|
|
|
|
except Exception as e:
|
|
|
|
self.env.cr.rollback()
|
2020-01-13 22:16:12 +07:00
|
|
|
self.env.clear()
|
2019-12-17 17:27:11 +07:00
|
|
|
_logger.exception(e)
|
|
|
|
message = str(e)
|
|
|
|
if host.last_exception == message:
|
|
|
|
host.exception_count += 1
|
|
|
|
else:
|
|
|
|
host.last_exception = str(e)
|
|
|
|
host.exception_count = 1
|
|
|
|
self._commit()
|
|
|
|
return random.uniform(0, 3)
|
|
|
|
else:
|
|
|
|
if host.last_exception:
|
|
|
|
host.last_exception = ""
|
|
|
|
host.exception_count = 0
|
|
|
|
return default_sleep
|
|
|
|
|
2019-07-22 17:30:22 +07:00
|
|
|
def _source_cleanup(self):
|
|
|
|
try:
|
|
|
|
if self.pool._init:
|
|
|
|
return
|
|
|
|
_logger.info('Source cleaning')
|
|
|
|
# we can remove a source only if no build are using them as name or rependency_ids aka as commit
|
|
|
|
cannot_be_deleted_builds = self.env['runbot.build'].search([('host', '=', fqdn()), ('local_state', 'not in', ('done', 'duplicate'))])
|
|
|
|
cannot_be_deleted_path = set()
|
|
|
|
for build in cannot_be_deleted_builds:
|
|
|
|
for commit in build._get_all_commit():
|
|
|
|
cannot_be_deleted_path.add(commit._source_path())
|
|
|
|
|
|
|
|
to_delete = set()
|
|
|
|
to_keep = set()
|
|
|
|
repos = self.search([('mode', '!=', 'disabled')])
|
|
|
|
for repo in repos:
|
|
|
|
repo_source = os.path.join(repo._root(), 'sources', repo._get_repo_name_part(), '*')
|
|
|
|
for source_dir in glob.glob(repo_source):
|
|
|
|
if source_dir not in cannot_be_deleted_path:
|
|
|
|
to_delete.add(source_dir)
|
|
|
|
else:
|
|
|
|
to_keep.add(source_dir)
|
|
|
|
|
|
|
|
# we are comparing cannot_be_deleted_path with to keep to sensure that the algorithm is working, we want to avoid to erase file by mistake
|
|
|
|
# note: it is possible that a parent_build is in testing without checkouting sources, but it should be exceptions
|
|
|
|
if to_delete:
|
2019-12-04 17:26:54 +07:00
|
|
|
if cannot_be_deleted_path != to_keep:
|
2019-07-22 18:02:24 +07:00
|
|
|
_logger.warning('Inconsistency between sources and database: \n%s \n%s' % (cannot_be_deleted_path-to_keep, to_keep-cannot_be_deleted_path))
|
2019-12-04 17:26:54 +07:00
|
|
|
to_delete = list(to_delete)
|
|
|
|
to_keep = list(to_keep)
|
|
|
|
cannot_be_deleted_path = list(cannot_be_deleted_path)
|
|
|
|
for source_dir in to_delete:
|
|
|
|
_logger.info('Deleting source: %s' % source_dir)
|
|
|
|
assert 'static' in source_dir
|
|
|
|
shutil.rmtree(source_dir)
|
|
|
|
_logger.info('%s/%s source folder where deleted (%s kept)' % (len(to_delete), len(to_delete+to_keep), len(to_keep)))
|
2019-07-22 17:30:22 +07:00
|
|
|
except:
|
|
|
|
_logger.error('An exception occured while cleaning sources')
|
|
|
|
pass
|
2019-12-06 16:17:40 +07:00
|
|
|
|
2019-12-17 17:27:11 +07:00
|
|
|
def _docker_cleanup(self):
|
2019-12-18 23:27:33 +07:00
|
|
|
_logger.info('Docker cleaning')
|
2019-12-17 17:27:11 +07:00
|
|
|
docker_ps_result = docker_ps()
|
|
|
|
containers = {int(dc.split('-', 1)[0]):dc for dc in docker_ps_result if dest_reg.match(dc)}
|
|
|
|
if containers:
|
|
|
|
candidates = self.env['runbot.build'].search([('id', 'in', list(containers.keys())), ('local_state', '=', 'done')])
|
|
|
|
for c in candidates:
|
|
|
|
_logger.info('container %s found running with build state done', containers[c.id])
|
2019-12-18 23:27:33 +07:00
|
|
|
docker_stop(containers[c.id], c._path())
|
2019-12-17 17:27:11 +07:00
|
|
|
ignored = {dc for dc in docker_ps_result if not dest_reg.match(dc)}
|
|
|
|
if ignored:
|
|
|
|
_logger.debug('docker (%s) not deleted because not dest format', " ".join(list(ignored)))
|
|
|
|
|
2019-12-06 16:17:40 +07:00
|
|
|
|
2019-12-17 17:27:11 +07:00
|
|
|
class RefTime(models.Model):
|
|
|
|
_name = "runbot.repo.reftime"
|
2020-01-02 22:38:49 +07:00
|
|
|
_description = "Repo reftime"
|
2019-12-17 17:27:11 +07:00
|
|
|
_log_access = False
|
2019-12-06 16:17:40 +07:00
|
|
|
|
2019-12-17 17:27:11 +07:00
|
|
|
time = fields.Float('Time', index=True, required=True)
|
|
|
|
repo_id = fields.Many2one('runbot.repo', 'Repository', required=True, ondelete='cascade')
|
2019-12-06 16:17:40 +07:00
|
|
|
|
|
|
|
|
2019-12-17 17:27:11 +07:00
|
|
|
class HookTime(models.Model):
|
|
|
|
_name = "runbot.repo.hooktime"
|
2020-01-02 22:38:49 +07:00
|
|
|
_description = "Repo hooktime"
|
2019-12-17 17:27:11 +07:00
|
|
|
_log_access = False
|
2019-12-06 16:17:40 +07:00
|
|
|
|
2019-12-17 17:27:11 +07:00
|
|
|
time = fields.Float('Time')
|
2020-01-06 14:50:57 +07:00
|
|
|
repo_id = fields.Many2one('runbot.repo', 'Repository', required=True, ondelete='cascade')
|