From 45721cdf6c3a97681b94a87b162903a910e8c656 Mon Sep 17 00:00:00 2001 From: Xavier-Do Date: Wed, 3 Jun 2020 16:17:42 +0200 Subject: [PATCH] [IMP] runbot: runbot 5.0 Runbot initial architechture was working for a single odoo repo, and was adapted to build enterprise. Addition of upgrade repo and test began to make result less intuitive revealing more weakness of the system. Adding to the oddities of duplicate detection and branch matching, there was some room for improvement in the runbot models. This (small) commit introduce the runbot v5.0, designed for a closer match of odoo's development flows, and hopefully improving devs experience and making runbot configuration more flexible. **Remotes:** remote intoduction helps to detect duplicate between odoo and odoo-dev repos: a commit is now on a repo, a repo having multiple remote. If a hash is in odoo-dev, we consider that it is the same in odoo. Note: github seems to manage commit kind of the same way. It is possible to send a status on a commit on odoo when the commit only exists in odoo-dev. This change also allows to remove some repo duplicate configuration between a repo and his dev corresponding repo. (modules, server files, manifests, ...) **Trigger:** before v5.0, only one build per repo was created, making it difficult to tweak what test to execute in what case. The example use case was for upgrade. We want to test upgrade to master when pushing on odoo. But we also want to test upgrade the same way when pushing on upgrade. We introduce a build that should be ran on pushing on either repo when each repo already have specific tests. The trigger allows to specify a build to create with a specific config. The trigger is executed when any repo of the trigger repo is pushed. The trigger can define depedencies: only build enterprise when pushing enterprise, but enterprise needs odoo. Test upgrade to master when pushing either odoo or upgrade. Trigger will also allows to extract some build like cla that where executed on both enterprise and odoo, and hidden in a subbuild. **Bundle:** Cross repo branches/pr branches matching was hidden in build creation and can be confusing. A build can be detected as a duplicate of a pr, but not always if naming is wrong or traget is invalid/changes. This was mainly because of how a community ref will be found. This was making ci on pr undeterministic if duplicate matching fails. This was also creating two build, with one pointing to the other when duplicate detection was working, but the visual result can be confusing. Associtaions of remotes and bundles fix this by adding all pr and related branches from all repo in a bundle. First of all this helps to visualise what the runbot consider has branch matching and that should be considered as part of the same task, giving a place where to warn devs of some possible inconsistencies. Associate whith repo/remote, we can consider branches in the same repo in a bundle as expected to have the same head. Only one build is created since trigger considers repo, not remotes. **Batch:** A batch is a group of build, a batch on a bundle can be compared to a build on a branch in previous version. When a branch is pushed, the corresponding bundle creates a new batch, and wait for new commit. Once no new update are detected in the batch for 60 seconds, All the trigger are executed if elligible. The created build are added to the batch in a batch_slot. It is also possible that an corresponding build exists (duplicate) and is added to the slot instead of creating a new build. Co-authored-by d-fence --- README.md | 194 +++- runbot/__manifest__.py | 54 +- runbot/common.py | 42 +- runbot/container.py | 74 +- runbot/controllers/badge.py | 71 +- runbot/controllers/frontend.py | 567 +++++----- runbot/controllers/hook.py | 56 +- runbot/data/build_parse.xml | 4 +- runbot/data/runbot_build_config_data.xml | 21 +- runbot/data/runbot_data.xml | 94 ++ runbot/documentation/images/repo_odoo.png | Bin 0 -> 36084 bytes runbot/documentation/images/repo_runbot.png | Bin 0 -> 35062 bytes runbot/documentation/images/trigger.png | Bin 0 -> 16628 bytes runbot/fields.py | 3 +- runbot/migrations/11.0.4.5/post-migration.py | 2 +- runbot/migrations/13.0.5.0/post-migration.py | 499 +++++++++ runbot/migrations/13.0.5.0/pre-migration.py | 51 + runbot/models/__init__.py | 22 +- runbot/models/batch.py | 408 +++++++ runbot/models/branch.py | 503 ++++----- runbot/models/build.py | 831 +++++++------- runbot/models/build_config.py | 574 ++++++++-- runbot/models/build_dependency.py | 16 - runbot/models/build_error.py | 36 +- runbot/models/build_stat.py | 52 +- runbot/models/build_stat_regex.py | 9 +- runbot/models/bundle.py | 222 ++++ runbot/models/commit.py | 226 ++++ runbot/models/database.py | 23 + runbot/models/event.py | 35 +- runbot/models/host.py | 38 +- runbot/models/ir_cron.py | 1 + runbot/models/ir_ui_view.py | 15 + runbot/models/project.py | 20 + runbot/models/repo.py | 984 +++++++--------- runbot/models/res_config_settings.py | 41 +- runbot/models/runbot.py | 350 ++++++ runbot/models/upgrade.py | 63 ++ runbot/models/user.py | 10 + runbot/models/version.py | 102 ++ runbot/security/ir.model.access.csv | 72 +- runbot/security/ir.rule.csv | 13 +- runbot/security/runbot_security.xml | 30 +- runbot/static/src/css/runbot.css | 87 -- runbot/static/src/css/runbot.scss | 202 ++++ runbot/static/src/js/runbot.js | 3 +- runbot/templates/assets.xml | 18 +- runbot/templates/batch.xml | 147 +++ runbot/templates/branch.xml | 120 +- runbot/templates/build.xml | 549 +++++---- runbot/templates/build_error.xml | 66 ++ runbot/templates/bundle.xml | 81 ++ runbot/templates/commit.xml | 126 +++ runbot/templates/dashboard.xml | 261 ++--- runbot/templates/frontend.xml | 283 ++--- runbot/templates/git.xml | 15 + runbot/templates/utils.xml | 310 ++++++ runbot/tests/__init__.py | 5 +- runbot/tests/common.py | 227 +++- runbot/tests/test_branch.py | 267 +++-- runbot/tests/test_build.py | 1047 +++++------------- runbot/tests/test_build_config_step.py | 127 +-- runbot/tests/test_build_error.py | 23 +- runbot/tests/test_build_stat.py | 28 +- runbot/tests/test_commit.py | 88 ++ runbot/tests/test_cron.py | 85 +- runbot/tests/test_event.py | 28 +- runbot/tests/test_frontend.py | 71 -- runbot/tests/test_repo.py | 472 +++++--- runbot/tests/test_runbot.py | 14 + runbot/tests/test_schedule.py | 33 +- runbot/tests/test_upgrade.py | 534 +++++++++ runbot/tests/test_version.py | 61 + runbot/views/assets.xml | 10 - runbot/views/branch_views.xml | 46 +- runbot/views/build_error_views.xml | 8 +- runbot/views/build_views.xml | 72 +- runbot/views/bundle_views.xml | 155 +++ runbot/views/commit_views.xml | 31 + runbot/views/config_views.xml | 40 +- runbot/views/error_log_views.xml | 16 +- runbot/views/host_views.xml | 2 +- runbot/views/repo_views.xml | 202 +++- runbot/views/res_config_settings_views.xml | 84 +- runbot/views/stat_views.xml | 11 +- runbot/views/upgrade.xml | 64 ++ runbot/views/warning_views.xml | 29 + runbot/wizards/multi_build_wizard.py | 2 - runbot_builder/builder.py | 14 +- runbot_builder/dbmover.py | 171 +++ runbot_cla/build_config.py | 61 +- 91 files changed, 8512 insertions(+), 4312 deletions(-) create mode 100644 runbot/data/runbot_data.xml create mode 100644 runbot/documentation/images/repo_odoo.png create mode 100644 runbot/documentation/images/repo_runbot.png create mode 100644 runbot/documentation/images/trigger.png create mode 100644 runbot/migrations/13.0.5.0/post-migration.py create mode 100644 runbot/migrations/13.0.5.0/pre-migration.py create mode 100644 runbot/models/batch.py delete mode 100644 runbot/models/build_dependency.py create mode 100644 runbot/models/bundle.py create mode 100644 runbot/models/commit.py create mode 100644 runbot/models/database.py create mode 100644 runbot/models/ir_ui_view.py create mode 100644 runbot/models/project.py create mode 100644 runbot/models/runbot.py create mode 100644 runbot/models/upgrade.py create mode 100644 runbot/models/user.py create mode 100644 runbot/models/version.py delete mode 100644 runbot/static/src/css/runbot.css create mode 100644 runbot/static/src/css/runbot.scss create mode 100644 runbot/templates/batch.xml create mode 100644 runbot/templates/build_error.xml create mode 100644 runbot/templates/bundle.xml create mode 100644 runbot/templates/commit.xml create mode 100644 runbot/templates/git.xml create mode 100644 runbot/templates/utils.xml create mode 100644 runbot/tests/test_commit.py delete mode 100644 runbot/tests/test_frontend.py create mode 100644 runbot/tests/test_runbot.py create mode 100644 runbot/tests/test_upgrade.py create mode 100644 runbot/tests/test_version.py delete mode 100644 runbot/views/assets.xml create mode 100644 runbot/views/bundle_views.xml create mode 100644 runbot/views/commit_views.xml create mode 100644 runbot/views/upgrade.xml create mode 100644 runbot/views/warning_views.xml create mode 100755 runbot_builder/dbmover.py diff --git a/README.md b/README.md index 94b13344..c46a3f9e 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,191 @@ -Odoo Runbot Repository -======================= +# Odoo Runbot Repository + +[![Build Status](http://runbot.odoo.com/runbot/badge/flat/13/13.0.svg)](http://runbot.odoo.com/runbot) This repository contains the source code of Odoo testing bot [runbot.odoo.com](http://runbot.odoo.com/runbot) and related addons. -Runbot ------- - -The `runbot/` directory holds the main runbot Odoo addon. - -Runbot CLA addon ------------------ -The `runbot_cla/` directory contains an Odoo addon that checks CLA. +## Warnings + +**Runbot will delete folders/ drop database to free some space during usage.** Even if only elements create by runbot are concerned, don't use runbot on a server with sensitive data. + +**Runbot changes some default odoo behaviours** Runbot database may work with other modules, but without any garantee. Avoid to use runbot on an existing database/install other modules than runbot. + +## Glossary/models + +Runbot v5 use a set of concept in order to cover all the use cases we need + +- **Project**: regroups a set of repositories that works togeter. Ususally one project is enough and a default *R&D* project exists. +- **Repository**: A repository name regrouping repo and forks Ex: odoo, enterprise +- **Remote**: A remote for a repository. Example: odoo/odoo, odoo-dev/odoo +- **Build**: A test instance, using a set of commit and parameter to run some code and produce a result. +- **Trigger**: Indicates that a build should be created when a new commit is pushed on a repo. A trigger has both trigger repos, and dependency repo. Ex: new commit on runbot-> build with runbot and a dependency with odoo. +- **Bundle**: A set or branches that work together: all the branches with the same name and all linked pr in the same project. +- **Batch**: A container for builds and commit of a bundle. When new commit is pushed on a branch, if a trigger exists for the repo of that branch, a new batch is created with this commit. After 60 seconds, if no other commit is added to the batch, a build is created by trigger having a new commit in this batch. + +## HOW TO + +This section give the basic steps to follow to configure the runbot v5.0. The configuration may differ from one use to another, this one will describe how to test addons for odoo, needing to fetch odoo core but without testing vanilla odoo. As an exemple, runbot will be used as a tested addons. + +### Setup + +Runbot is an addon for odoo, meaning that both odoo and runbot code are needed to run. Some tips to configure odoo are available in [odoo setup documentation](https://www.odoo.com/documentation/13.0/setup/install.html#setup-install-source) (requirements, postgres, ...) This page will mainly focus on runbot specificities. + +Chose a workspace and clone both repository. + +``` +git clone https://github.com/odoo/odoo.git +git clone https://github.com/odoo/runbot.git +``` + +Runbot depends on some odoo version, runbot v5.0 is currently based on odoo 13.0 (Runbot 13.0.5.0). Both runbot and odoo 13.0 branch should be chekouted. *This logic follow the convention imposed by runbot to run code from different repository, the branch name must be the same or be prefixed by a main branch name.* + +``` +git -C odoo checkout 13.0 +git -C runbot checkout 13.0 +``` + +### Specific requirements + +You will also need to install docker on your system. The user that will be used to operate the runbot must also have access to the Docker commands. On Debian like system's , it's only a matter of adding the user to the `docker` group. + +``` +sudo adduser $USER docker +``` + +The only specific python requirement is the `matplotlib` library. + +``` +sudo apt install python3-matplotlib +``` + +### Install and start runbot + +Runbot being an odoo addon, you need to start odoo giving runbot in the addons path. Install runbot by giving the -i instruction. + +``` +python3 odoo/odoo-bin -d runbot_database --addons-path odoo/addons,runbot -i runbot --stop-after-init --without-demo=1 +``` + +Then, launch runbot + +``` +python3 odoo/odoo-bin -d runbot_database --addons-path odoo/addons,runbot --limit-memory-soft 4294967296 --limit-memory-hard 4311744512 --limit-time-real-cron=1800 +``` + +Note: +- --limit-time-real-cron is important to ensure that cron have enough time to build docker images and clone repos the first time. It may be reduced to a lower value later (600 is adviced). +- --limit-memory-* is not mandatory, but fetching odoo on multiple remote with only 2Gib may result in a failure of the fetch command. If git fails to create async thread or run out of memory, increasing memory limit may be a good idea. *cf. odoo-bin --help for more info.* + +You may want to configure a service or launch odoo in a screen depending on your preferences. + +### Configuration + +*Note: Runbot is optimized to run commit discovery and build sheduling on different host to allow load share on different machine. This basic configuration will show how to run runbot on a single machine, a less-tested use case* + +#### Bootstrap +Once launched, the cron should start to do basic work. The commit discovery and build sheduling is disabled by default, but runbot bootstrap will start to setup some directories in static. +>Starting job `Runbot`. +``` +ls runbot/runbot/static +``` +>build docker nginx repo sources src + +- **repo** contains the bare repositories +- **sources** contains the exported sources needed for each build +- **build** contains the different workspaces for dockers, containing logs/ filestore, ... +- **docker** contains DockerFile and docker build logs +- **nginx** contaings the nginx config used to access running instances +All of them are emply for now. + +A database defined by *runbot.runbot_db_template* icp will be created. By default, runbot use template1. This database will be used as template for testing builds. You can change this database for more customisation. + +Other cron operation are still disabled for now. + +#### Access backend +Access odoo "backend" *127.0.0.1:8069/web* + +If not connected yet, connect as admin (default password: admin). You may want to check that.Check odoo documentation for other needed configuration as master password. This is mainly needed for production purpose, a local instance will work as it is. +If you create another Odoo user to manage the runbot, you may add the group *Runbot administrator* to this user + +#### Add remotes and repositories +Access runbot app and go to the Repos->Repositories menu + +Create a new repo for odoo +![Odoo repo configuration](runbot/documentation/images/repo_odoo.png "Odoo repo configuration") + +- A single remote is added, the base odoo repo. Only branches will be fetch to limit disc usage and branch created in backend. It is possible to add multiple remotes for forks. + +- The repo is in poll mode since github won't hook your runbot instance. Poll mode is limited to one update every 5 minutes. + +- The modules to install pattern is -* in order to disable default module to test for this repo. This will speed up install. To install and test all module, leave this space empty or use \*. Some module may be blacklisted individually, by using *-module,-other_module, l10n_\*. + +- Server files will allow runbot to know the possible file to use to launch odoo. odoo-bin is the one to use for the last version, but you may want to add other server files for older versions (comma separeted list). The same logic is used for manifest files. + +- Addons path are the place where addons directories are located. This will be used for addon-path parameter but also for modules discovery. + +Create a repo for you custom addons repo +![Odoo repo configuration](runbot/documentation/images/repo_runbot.png "Odoo repo configuration") +- For your custom repo, it is adviced to configure the repo in hook mode if possible. +- No server files should be given since it is an addons repo. +- No addons_path given to use repo root as default. +- we only want to test runbot and runbot_cla on runbot, `-*,runbot,runbot_cla` will blacklist all except this ones +- The remote has PR option checked to fetch pull request too. This is optional. + +#### Tweak runbot parameters and enable features + +Acces the runbot settings and tweak the default parameters. +- The *number of worker* is the default number of parallel testing builds per machine. It is adviced to keep one physical core per worker on a dedicated machine. On a local machine,keep it low, **2** is a good start (using 8 on runbot.odoo.com). + +- The *number of running build* is the number of parallel running builds. Runbot will start to kill running build once this limit is reached. This number can be pumped up on a server (using 60 on runbot.odoo.com). +- *Runbot domain* will mainly be used for nginx to access running build. +- Max commit age is the limit after what a branch head will be ignorred in processing. This will reduce the processing of old non deleted branch. Keep in mind that pushing an old commit on a branch will also be ignored by runbot. + +- **Discover new commits** is disabled by default but is needed to fetch repositories and create new commits/batches/builds. **Check** this option. + +- **Discover new commits** is needed to fetch repositories and create new commits/batches/builds. **Check** this option. + +- **Schedule builds** is needed to process pending/testing. **Check** this option. To use a dedicated host to schedule builds, leave this option unchecked and use the dedicated tool in runbot/builder. + +Save the parameter. The next cron execution should do a lot of setup. +NOTE: The default limit_time_real-cron should be ideally set to at least 1800 for this operation. +- If schedule builds is check, the first time consuming operation will be to build the docker image. You can check the current running dockers with `docker ps -a`. One of them should be up for a few minutes. If the build is not finished at the end of the cron timeout, docker build will either resolve its progress and continue the next step, but could also fail on the same step each time and stay stuck. Ensure to have limit-time-real-cron high enough, depending on your brandwidth and power this value could be 600-1800 (or more). Let's wait and make a coffee. You can also check progress by tailing runbot/static/docker/docker_build.txt + +- The next git update will init the repositories, a config file with your remotes should be created for each repo. You can check the content in /runbot/static/repo/(runbot|odoo)/config. The repo will be fetched, this operation may take some time too. + +Those two operation will be faster on next executions. + +Finally, the first new branches/batches should be created. You can list them in Bundle > Bundles. + +#### Bundles configuration + +We need to define which bundle are base versions (master should already be marked as a base). In runbot case we only need 13.0 but all saas- and numerical branches should be marked as base in a general way. A base will be used to fill missing commits in a batch if a bundle doesn't have a branch in each repo, and will trigger the creation of a version. Versions may be use for upgrade test. + +Bundles can also be marked as `no_build`, so that new commit won't create batch creation and bundle won't be displayed on main page. + +#### Triggers +At this point, runbot will discover new branches, new commits, create bundle, but no build will be created. + +When a new commit is discovered, the branch is update with a new commit. Then this commit is added in a batch, a container for new builds when they arrive, but only if a trigger corresponding to this repo exists. After one minute without new commit update in the batch, the different triggers will create one build each. +In this example, we want to create a new build when a new commit is pushed on runbot, and this builds needs a commit in odoo as a dependency. + +![Odoo trigger configuration](runbot/documentation/images/trigger.png "Odoo trigger configuration") + +Note that the used config is default. It is adviced to customize this config. In our example, adding */runbot,/runbot_cla* test-tags on config step *all* may be a good idea to speed up testing by skipping tests from dependencies. + +When a branch is pushed, a new batch will be created, and after one minute the new build will be created if no other change is detected. The build remains in pending state for now. Check the result on 127.0.0.1:8069/runbot + +#### Hosts +Runbot is able to share pending builds accross multiple hosts. In the present case, there is only one. A new host will never assign pending build to himself by default. +Go in the Build Hosts menu and chose yours. Uncheck *Only accept assigned build*. You can also tweak the number of parallel builds for this host. + +### Modules filters +Modules to install can be filtered by repo, and by config step. The first filter to be applied is the repo one, creating the default list for a config step. +Addong -module on a repo will remove the module from the default, it is adviced to reflect the default case on repo. To test only a custom module, adding *-\** on odoo repo will disable all odoo addons. Only dependencies of custom modules will be installed. Some specific modules can also be filtered using *-module1,-module1* or somme specific modules can be kept using *-\*,module1,module2* +Module can also be filtered on a config step with the same logic as repo filter, exept that all modules can be unblacklisted from repo by starting the list with *\** (all available modules) +It is also possible to add test-tags to config step to allow more module to be installed but only testing some specific one. Test tags: */module1,/module2* + +### db template +Db creation will use template0 by default. It is possible to specify a specific template to use in runbot config *Postgresql template*. It is mainly use to add extension by default. diff --git a/runbot/__manifest__.py b/runbot/__manifest__.py index 693cb9a0..c6c0962d 100644 --- a/runbot/__manifest__.py +++ b/runbot/__manifest__.py @@ -2,39 +2,53 @@ { 'name': "runbot", 'summary': "Runbot", - 'description': "Runbot for Odoo 11.0", + 'description': "Runbot for Odoo 13.0", 'author': "Odoo SA", 'website': "http://runbot.odoo.com", 'category': 'Website', - 'version': '4.10', - 'depends': ['website', 'base'], + 'version': '5.0', + 'depends': ['base', 'base_automation', 'website'], 'data': [ + 'data/build_parse.xml', + 'data/error_link.xml', + 'data/runbot_build_config_data.xml', + 'data/runbot_data.xml', + 'data/runbot_error_regex_data.xml', + 'data/website_data.xml', + 'security/runbot_security.xml', 'security/ir.model.access.csv', 'security/ir.rule.csv', - 'views/assets.xml', - 'views/repo_views.xml', + + 'templates/assets.xml', + 'templates/badge.xml', + 'templates/batch.xml', + 'templates/branch.xml', + 'templates/build.xml', + 'templates/bundle.xml', + 'templates/commit.xml', + 'templates/dashboard.xml', + 'templates/frontend.xml', + 'templates/git.xml', + 'templates/nginx.xml', + 'templates/utils.xml', + 'templates/build_error.xml', + 'views/branch_views.xml', - 'views/build_views.xml', - 'views/host_views.xml', 'views/build_error_views.xml', - 'views/error_log_views.xml', + 'views/build_views.xml', + 'views/bundle_views.xml', + 'views/commit_views.xml', 'views/config_views.xml', + 'views/error_log_views.xml', + 'views/host_views.xml', + 'views/repo_views.xml', 'views/res_config_settings_views.xml', 'views/stat_views.xml', + 'views/upgrade.xml', + 'views/warning_views.xml', + 'wizards/mutli_build_wizard_views.xml', 'wizards/stat_regex_wizard_views.xml', - 'templates/frontend.xml', - 'templates/build.xml', - 'templates/assets.xml', - 'templates/dashboard.xml', - 'templates/nginx.xml', - 'templates/badge.xml', - 'templates/branch.xml', - 'data/runbot_build_config_data.xml', - 'data/build_parse.xml', - 'data/runbot_error_regex_data.xml', - 'data/error_link.xml', - 'data/website_data.xml', ], } diff --git a/runbot/common.py b/runbot/common.py index f9acc67f..c78ab70b 100644 --- a/runbot/common.py +++ b/runbot/common.py @@ -3,11 +3,11 @@ import contextlib import itertools import logging -import os import psycopg2 import re import socket import time +import os from collections import OrderedDict from datetime import timedelta @@ -19,29 +19,11 @@ from odoo.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT _logger = logging.getLogger(__name__) -dest_reg = re.compile(r'^\d{5,}-.{1,32}-[\da-f]{6}(.*)*$') +dest_reg = re.compile(r'^\d{5,}-.+$') -class Commit(): - def __init__(self, repo, sha): - self.repo = repo - self.sha = sha - def _source_path(self, *path): - return self.repo._source_path(self.sha, *path) - - def export(self): - return self.repo._git_export(self.sha) - - def read_source(self, file, mode='r'): - file_path = self._source_path(file) - try: - with open(file_path, mode) as f: - return f.read() - except: - return False - - def __str__(self): - return '%s:%s' % (self.repo.short_name, self.sha) +class RunbotException(Exception): + pass def fqdn(): @@ -89,15 +71,29 @@ def rfind(filename, pattern): return False +def time_delta(time): + if isinstance(time, timedelta): + return time + return timedelta(seconds=-time) + + def s2human(time): """Convert a time in second into an human readable string""" return format_timedelta( - timedelta(seconds=time), + time_delta(time), format="narrow", threshold=2.1, ) +def s2human_long(time): + return format_timedelta( + time_delta(time), + threshold=2.1, + add_direction=True, locale='en' + ) + + @contextlib.contextmanager def local_pgadmin_cursor(): cnx = None diff --git a/runbot/container.py b/runbot/container.py index c765f760..54fca9fb 100644 --- a/runbot/container.py +++ b/runbot/container.py @@ -35,13 +35,15 @@ ENV COVERAGE_FILE /data/build/.coverage class Command(): - def __init__(self, pres, cmd, posts, finals=None, config_tuples=None): + + def __init__(self, pres, cmd, posts, finals=None, config_tuples=None, cmd_checker=None): """ Command object that represent commands to run in Docker container :param pres: list of pre-commands :param cmd: list of main command only run if the pres commands succeed (&&) :param posts: list of post commands posts only run if the cmd command succedd (&&) :param finals: list of finals commands always executed :param config_tuples: list of key,value tuples to write in config file + :param cmd_checker: a checker object that must have a `_cmd_check` method that will be called at build returns a string of the full command line to run """ self.pres = pres or [] @@ -49,6 +51,7 @@ class Command(): self.posts = posts or [] self.finals = finals or [] self.config_tuples = config_tuples or [] + self.cmd_checker = cmd_checker def __getattr__(self, name): return getattr(self.cmd, name) @@ -57,7 +60,7 @@ class Command(): return self.cmd[key] def __add__(self, l): - return Command(self.pres, self.cmd + l, self.posts, self.finals, self.config_tuples) + return Command(self.pres, self.cmd + l, self.posts, self.finals, self.config_tuples, self.cmd_checker) def __str__(self): return ' '.join(self) @@ -66,6 +69,8 @@ class Command(): return self.build().replace('&& ', '&&\n').replace('|| ', '||\n\t').replace(';', ';\n') def build(self): + if self.cmd_checker: + self.cmd_checker._cmd_check(self) cmd_chain = [] cmd_chain += [' '.join(pre) for pre in self.pres if pre] cmd_chain.append(' '.join(self)) @@ -95,6 +100,10 @@ class Command(): def docker_build(log_path, build_dir): + return _docker_build(log_path, build_dir) + + +def _docker_build(log_path, build_dir): """Build the docker image :param log_path: path to the logfile that will contain odoo stdout and stderr :param build_dir: the build directory that contains the Odoo sources to build. @@ -111,7 +120,11 @@ def docker_build(log_path, build_dir): dbuild.wait() -def docker_run(run_cmd, log_path, build_dir, container_name, exposed_ports=None, cpu_limit=None, preexec_fn=None, ro_volumes=None, env_variables=None): +def docker_run(*args, **kwargs): + return _docker_run(*args, **kwargs) + + +def _docker_run(run_cmd, log_path, build_dir, container_name, exposed_ports=None, cpu_limit=None, preexec_fn=None, ro_volumes=None, env_variables=None): """Run tests in a docker container :param run_cmd: command string to run in container :param log_path: path to the logfile that will contain odoo stdout and stderr @@ -166,11 +179,16 @@ def docker_run(run_cmd, log_path, build_dir, container_name, exposed_ports=None, if cpu_limit: docker_command.extend(['--ulimit', 'cpu=%s' % int(cpu_limit)]) docker_command.extend(['odoo:runbot_tests', '/bin/bash', '-c', "%s" % run_cmd]) - docker_run = subprocess.Popen(docker_command, stdout=logs, stderr=logs, preexec_fn=preexec_fn, close_fds=False, cwd=build_dir) + subprocess.Popen(docker_command, stdout=logs, stderr=logs, preexec_fn=preexec_fn, close_fds=False, cwd=build_dir) _logger.info('Started Docker container %s', container_name) return + def docker_stop(container_name, build_dir=None): + return _docker_stop(container_name, build_dir) + + +def _docker_stop(container_name, build_dir): """Stops the container named container_name""" container_name = sanitize_container_name(container_name) _logger.info('Stopping container %s', container_name) @@ -181,11 +199,13 @@ def docker_stop(container_name, build_dir=None): _logger.info('Stopping docker without defined build_dir') subprocess.run(['docker', 'stop', container_name]) + def docker_is_running(container_name): container_name = sanitize_container_name(container_name) dinspect = subprocess.run(['docker', 'container', 'inspect', container_name], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) return True if dinspect.returncode == 0 else False + def docker_state(container_name, build_dir): container_name = sanitize_container_name(container_name) started = os.path.exists(os.path.join(build_dir, 'start-%s' % container_name)) @@ -201,6 +221,7 @@ def docker_state(container_name, build_dir): return 'UNKNOWN' + def docker_clear_state(container_name, build_dir): """Return True if container is still running""" container_name = sanitize_container_name(container_name) @@ -209,6 +230,7 @@ def docker_clear_state(container_name, build_dir): if os.path.exists(os.path.join(build_dir, 'end-%s' % container_name)): os.remove(os.path.join(build_dir, 'end-%s' % container_name)) + def docker_get_gateway_ip(): """Return the host ip of the docker default bridge gateway""" docker_net_inspect = subprocess.run(['docker', 'network', 'inspect', 'bridge'], stdout=subprocess.PIPE) @@ -220,7 +242,12 @@ def docker_get_gateway_ip(): except KeyError: return None + def docker_ps(): + return _docker_ps() + + +def _docker_ps(): """Return a list of running containers names""" try: docker_ps = subprocess.run(['docker', 'ps', '--format', '{{.Names}}'], stderr=subprocess.DEVNULL, stdout=subprocess.PIPE) @@ -229,7 +256,11 @@ def docker_ps(): return [] if docker_ps.returncode != 0: return [] - return docker_ps.stdout.decode().strip().split('\n') + output = docker_ps.stdout.decode() + if not output: + return [] + return output.strip().split('\n') + def build(args): """Build container from CLI""" @@ -272,7 +303,7 @@ def tests(args): container_name = 'odoo-container-test-%s' % datetime.datetime.now().microsecond docker_run(cmd.build(), env_log, args.build_dir, container_name, env_variables=env_variables) expected = 'testa is test a and testb is "test b"' - time.sleep(3) # ugly sleep to wait for docker process to flush the log file + time.sleep(3) # ugly sleep to wait for docker process to flush the log file assert expected in open(env_log,'r').read() # Test testing @@ -281,13 +312,13 @@ def tests(args): python_params = [] if args.coverage: omit = ['--omit', '*__manifest__.py'] - python_params = [ '-m', 'coverage', 'run', '--branch', '--source', '/data/build'] + omit + python_params = ['-m', 'coverage', 'run', '--branch', '--source', '/data/build'] + omit posts = [['python%s' % py_version, "-m", "coverage", "html", "-d", "/data/build/coverage", "--ignore-errors"], ['python%s' % py_version, "-m", "coverage", "xml", "--ignore-errors"]] os.makedirs(os.path.join(args.build_dir, 'coverage'), exist_ok=True) elif args.flamegraph: flame_log = '/data/build/logs/flame.log' python_params = ['-m', 'flamegraph', '-o', flame_log] - odoo_cmd = ['python%s' % py_version ] + python_params + ['/data/build/odoo-bin', '-d %s' % args.db_name, '--addons-path=/data/build/addons', '-i', args.odoo_modules, '--test-enable', '--stop-after-init', '--max-cron-threads=0'] + odoo_cmd = ['python%s' % py_version] + python_params + ['/data/build/odoo-bin', '-d %s' % args.db_name, '--addons-path=/data/build/addons', '-i', args.odoo_modules, '--test-enable', '--stop-after-init', '--max-cron-threads=0'] cmd = Command(pres, odoo_cmd, posts) cmd.add_config_tuple('data_dir', '/data/build/datadir') cmd.add_config_tuple('db_user', '%s' % os.getlogin()) @@ -345,6 +376,29 @@ def tests(args): docker_run(cmd.build(), logfile, args.build_dir, container_name, exposed_ports=[args.odoo_port, args.odoo_port + 1], cpu_limit=300) +############################################################################## +# Ugly monkey patch to set runbot in set runbot in testing mode +# No Docker will be started, instead a fake docker_run function will be used +############################################################################## + +if os.environ.get('RUNBOT_MODE') == 'test': + _logger.warning('Using Fake Docker') + + def fake_docker_run(run_cmd, log_path, build_dir, container_name, exposed_ports=None, cpu_limit=None, preexec_fn=None, ro_volumes=None, env_variables=None, *args, **kwargs): + _logger.info('Docker Fake Run: %s', run_cmd) + open(os.path.join(build_dir, 'start-%s' % container_name), 'w').write('fake start\n') + open(os.path.join(build_dir, 'end-%s' % container_name), 'w').write('fake end') + with open(log_path, 'w') as log_file: + log_file.write('Fake docker_run started\n') + log_file.write('run_cmd: %s\n' % run_cmd) + log_file.write('build_dir: %s\n' % container_name) + log_file.write('container_name: %s\n' % container_name) + log_file.write('.modules.loading: Modules loaded.\n') + log_file.write('Initiating shutdown\n') + + docker_run = fake_docker_run + + if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s: %(message)s') parser = argparse.ArgumentParser() @@ -358,8 +412,8 @@ if __name__ == '__main__': p_test.add_argument('odoo_port', type=int) p_test.add_argument('db_name') group = p_test.add_mutually_exclusive_group() - group.add_argument('--coverage', action='store_true', help= 'test a build with coverage') - group.add_argument('--flamegraph', action='store_true', help= 'test a build and draw a flamegraph') + group.add_argument('--coverage', action='store_true', help='test a build with coverage') + group.add_argument('--flamegraph', action='store_true', help='test a build and draw a flamegraph') p_test.add_argument('-i', dest='odoo_modules', default='web', help='Comma separated list of modules') p_test.add_argument('--kill', action='store_true', default=False, help='Also test container kill') p_test.add_argument('--dump', action='store_true', default=False, help='Test database export with pg_dump') diff --git a/runbot/controllers/badge.py b/runbot/controllers/badge.py index e6890e78..c3b63aa9 100644 --- a/runbot/controllers/badge.py +++ b/runbot/controllers/badge.py @@ -11,51 +11,47 @@ from odoo.http import request, route, Controller class RunbotBadge(Controller): @route([ - '/runbot/badge//.svg', - '/runbot/badge///.svg', + '/runbot/badge//.svg', + '/runbot/badge/trigger//.svg', + '/runbot/badge///.svg', + '/runbot/badge/trigger///.svg', ], type="http", auth="public", methods=['GET', 'HEAD']) - def badge(self, repo_id, branch, theme='default'): - - domain = [('repo_id', '=', repo_id), - ('branch_id.branch_name', '=', branch), - ('branch_id.sticky', '=', True), - ('hidden', '=', False), - ('parent_id', '=', False), - ('global_state', 'in', ['testing', 'running', 'done']), - ('global_result', 'not in', ['skipped', 'manually_killed']), - ] - - last_update = '__last_update' - builds = request.env['runbot.build'].sudo().search_read( - domain, ['global_state', 'global_result', 'build_age', last_update], - order='id desc', limit=1) - - if not builds: - return request.not_found() - - build = builds[0] - etag = request.httprequest.headers.get('If-None-Match') - retag = hashlib.md5(str(build[last_update]).encode()).hexdigest() - - if etag == retag: - return werkzeug.wrappers.Response(status=304) - - if build['global_state'] in ('testing', 'waiting'): - state = build['global_state'] - cache_factor = 1 + def badge(self, name, repo_id=False, trigger_id=False, theme='default'): + if trigger_id: + triggers = request.env['runbot.trigger'].browse(trigger_id) else: - cache_factor = 2 - if build['global_result'] == 'ok': + triggers = request.env['runbot.trigger'].search([('repo_ids', 'in', repo_id)]) + # -> hack to use repo. Would be better to change logic and use a trigger_id in params + bundle = request.env['runbot.bundle'].search([('name', '=', name), + ('project_id', '=', request.env.ref('runbot.main_project').id)]) # WARNING no filter on project + if not bundle or not triggers: + return request.not_found() + batch = request.env['runbot.batch'].search([ + ('bundle_id', '=', bundle.id), + ('state', '=', 'done'), + ('category_id', '=', request.env.ref('runbot.default_category').id) + ], order='id desc', limit=1) + + builds = batch.slot_ids.filtered(lambda s: s.trigger_id in triggers).mapped('build_id') + if not builds: + state = 'testing' + else: + result = builds.result_multi() + if result == 'ok': state = 'success' - elif build['global_result'] == 'warn': + elif result == 'warn': state = 'warning' else: state = 'failed' + etag = request.httprequest.headers.get('If-None-Match') + retag = hashlib.md5(state.encode()).hexdigest() + if etag == retag: + return werkzeug.wrappers.Response(status=304) + # from https://github.com/badges/shields/blob/master/colorscheme.json color = { 'testing': "#dfb317", - 'waiting': "#dfb317", 'success': "#4c1", 'failed': "#e05d44", 'warning': "#fe7d37", @@ -75,13 +71,12 @@ class RunbotBadge(Controller): self.width = text_width(text) + 10 data = { - 'left': Text(branch, '#555'), + 'left': Text(name, '#555'), 'right': Text(state, color), } - five_minutes = 5 * 60 headers = [ ('Content-Type', 'image/svg+xml'), - ('Cache-Control', 'max-age=%d' % (five_minutes * cache_factor,)), + ('Cache-Control', 'max-age=%d' % (10*60,)), ('ETag', retag), ] return request.render("runbot.badge_" + theme, data, headers=headers) diff --git a/runbot/controllers/frontend.py b/runbot/controllers/frontend.py index c27d5a2d..8d01e356 100644 --- a/runbot/controllers/frontend.py +++ b/runbot/controllers/frontend.py @@ -1,19 +1,67 @@ # -*- coding: utf-8 -*- -import operator +import datetime import werkzeug -from collections import OrderedDict +import logging +import functools import werkzeug.utils import werkzeug.urls +from werkzeug.exceptions import NotFound, Forbidden + from odoo.addons.http_routing.models.ir_http import slug from odoo.addons.website.controllers.main import QueryURL -from odoo.http import Controller, request, route -from ..common import uniq_list, flatten, fqdn +from odoo.http import Controller, Response, request, route as o_route from odoo.osv import expression -from odoo.exceptions import UserError +_logger = logging.getLogger(__name__) + + +def route(routes, **kw): + def decorator(f): + @o_route(routes, **kw) + @functools.wraps(f) + def response_wrap(*args, **kwargs): + projects = request.env['runbot.project'].search([]) + more = request.httprequest.cookies.get('more', False) == '1' + filter_mode = request.httprequest.cookies.get('filter_mode', 'all') + keep_search = request.httprequest.cookies.get('keep_search', False) == '1' + cookie_search = request.httprequest.cookies.get('search', '') + refresh = kwargs.get('refresh', False) + nb_build_errors = request.env['runbot.build.error'].search_count([('random', '=', True), ('parent_id', '=', False)]) + nb_assigned_errors = request.env['runbot.build.error'].search_count([('responsible', '=', request.env.user.id)]) + kwargs['more'] = more + kwargs['projects'] = projects + + response = f(*args, **kwargs) + if isinstance(response, Response): + if keep_search and cookie_search and 'search' not in kwargs: + search = cookie_search + else: + search = kwargs.get('search', '') + if keep_search and cookie_search != search: + response.set_cookie('search', search) + + project = response.qcontext.get('project') or projects[0] + + response.qcontext['projects'] = projects + response.qcontext['more'] = more + response.qcontext['keep_search'] = keep_search + response.qcontext['search'] = search + response.qcontext['current_path'] = request.httprequest.full_path + response.qcontext['refresh'] = refresh + response.qcontext['filter_mode'] = filter_mode + response.qcontext['qu'] = QueryURL('/runbot/%s' % (slug(project)), path_args=['search'], search=search, refresh=refresh) + if 'title' not in response.qcontext: + response.qcontext['title'] = 'Runbot %s' % project.name or '' + response.qcontext['nb_build_errors'] = nb_build_errors + response.qcontext['nb_assigned_errors'] = nb_assigned_errors + + return response + return response_wrap + return decorator + class Runbot(Controller): @@ -26,354 +74,293 @@ class Runbot(Controller): level = ['info', 'warning', 'danger'][int(pending_count > warn) + int(pending_count > crit)] return pending_count, level, scheduled_count - @route(['/runbot', '/runbot/repo/'], website=True, auth='public', type='http') - def repo(self, repo=None, search='', refresh='', **kwargs): + @o_route([ + '/runbot/submit' + ], type='http', auth="public", methods=['GET', 'POST'], csrf=False) + def submit(self, more=False, redirect='/', keep_search=False, category=False, filter_mode=False, update_triggers=False, **kwargs): + response = werkzeug.utils.redirect(redirect) + response.set_cookie('more', '1' if more else '0') + response.set_cookie('keep_search', '1' if keep_search else '0') + response.set_cookie('filter_mode', filter_mode or 'all') + response.set_cookie('category', category or '0') + if update_triggers: + enabled_triggers = [] + project_id = int(update_triggers) + for key in kwargs.keys(): + if key.startswith('trigger_'): + enabled_triggers.append(key.replace('trigger_', '')) + + key = 'trigger_display_%s' % project_id + if len(request.env['runbot.trigger'].search([('project_id', '=', project_id)])) == len(enabled_triggers): + response.delete_cookie(key) + else: + response.set_cookie(key, '-'.join(enabled_triggers)) + return response + + @route(['/', + '/runbot', + '/runbot/', + '/runbot//search/'], website=True, auth='public', type='http') + def bundles(self, project=None, search='', projects=False, refresh=False, **kwargs): search = search if len(search) < 60 else search[:60] - branch_obj = request.env['runbot.branch'] - build_obj = request.env['runbot.build'] - repo_obj = request.env['runbot.repo'] + env = request.env + categories = env['runbot.category'].search([]) + if not project and projects: + project = projects[0] - repo_ids = repo_obj.search([]) - repos = repo_obj.browse(repo_ids) - if not repo and repos: - repo = repos[0].id - - pending = self._pending() + pending_count, level, scheduled_count = self._pending() context = { - 'repos': repos.ids, - 'repo': repo, - 'host_stats': [], - 'pending_total': pending[0], - 'pending_level': pending[1], - 'scheduled_count': pending[2], - 'hosts_data': request.env['runbot.host'].search([]), + 'categories': categories, 'search': search, - 'refresh': refresh, + 'message': request.env['ir.config_parameter'].sudo().get_param('runbot.runbot_message'), + 'pending_total': pending_count, + 'pending_level': level, + 'scheduled_count': scheduled_count, + 'hosts_data': request.env['runbot.host'].search([]), } + if project: + domain = [('last_batch', '!=', False), ('project_id', '=', project.id), ('no_build', '=', False)] + + filter_mode = request.httprequest.cookies.get('filter_mode', False) + if filter_mode == 'sticky': + domain.append(('sticky', '=', True)) + elif filter_mode == 'nosticky': + domain.append(('sticky', '=', False)) - build_ids = [] - if repo: - domain = [('repo_id', '=', repo.id)] if search: - search_domain = [] - for to_search in search.split("|"): - search_domain = ['|', '|', '|'] + search_domain - search_domain += [('dest', 'ilike', to_search), ('subject', 'ilike', to_search), ('branch_id.branch_name', 'ilike', to_search)] - domain += search_domain[1:] - domain = expression.AND([domain, [('hidden', '=', False)]]) # don't display children builds on repo view - build_ids = build_obj.search(domain, limit=100) - branch_ids, build_by_branch_ids = [], {} + search_domains = [] + pr_numbers = [] + for search_elem in search.split("|"): + if search_elem.isnumeric(): + pr_numbers.append(int(search_elem)) + else: + search_domains.append([('name', 'like', search_elem)]) + if pr_numbers: + res = request.env['runbot.branch'].search([('name', 'in', pr_numbers)]) + if res: + search_domains.append([('id', 'in', res.mapped('bundle_id').ids)]) + search_domain = expression.OR(search_domains) + print(search_domain) + domain = expression.AND([domain, search_domain]) - if build_ids: - branch_query = """ - SELECT br.id FROM runbot_branch br INNER JOIN runbot_build bu ON br.id=bu.branch_id WHERE bu.id in %s - ORDER BY bu.sequence DESC - """ - sticky_dom = [('repo_id', '=', repo.id), ('sticky', '=', True)] - sticky_branch_ids = [] if search else branch_obj.search(sticky_dom).sorted(key=lambda b: (b.branch_name == 'master', b.id), reverse=True).ids - request._cr.execute(branch_query, (tuple(build_ids.ids),)) - branch_ids = uniq_list(sticky_branch_ids + [br[0] for br in request._cr.fetchall()]) + e = expression.expression(domain, request.env['runbot.bundle']) + where_clause, where_params = e.to_sql() - build_query = """ - SELECT - branch_id, - max(case when br_bu.row = 1 then br_bu.build_id end), - max(case when br_bu.row = 2 then br_bu.build_id end), - max(case when br_bu.row = 3 then br_bu.build_id end), - max(case when br_bu.row = 4 then br_bu.build_id end) - FROM ( - SELECT - br.id AS branch_id, - bu.id AS build_id, - row_number() OVER (PARTITION BY branch_id) AS row - FROM - runbot_branch br INNER JOIN runbot_build bu ON br.id=bu.branch_id - WHERE - br.id in %s AND (bu.hidden = 'f' OR bu.hidden IS NULL) - GROUP BY br.id, bu.id - ORDER BY br.id, bu.id DESC - ) AS br_bu - WHERE - row <= 4 - GROUP BY br_bu.branch_id; - """ - request._cr.execute(build_query, (tuple(branch_ids),)) - build_by_branch_ids = { - rec[0]: [r for r in rec[1:] if r is not None] for rec in request._cr.fetchall() - } + env.cr.execute(""" + SELECT id FROM runbot_bundle + WHERE {where_clause} + ORDER BY + (case when sticky then 1 when sticky is null then 2 else 2 end), + case when sticky then version_number end collate "C" desc, + last_batch desc + LIMIT 40""".format(where_clause=where_clause), where_params) + bundles = env['runbot.bundle'].browse([r[0] for r in env.cr.fetchall()]) - branches = branch_obj.browse(branch_ids) - build_ids = flatten(build_by_branch_ids.values()) - build_dict = {build.id: build for build in build_obj.browse(build_ids)} + category_id = int(request.httprequest.cookies.get('category') or 0) or request.env['ir.model.data'].xmlid_to_res_id('runbot.default_category') - def branch_info(branch): - return { - 'branch': branch, - 'builds': [build_dict[build_id] for build_id in build_by_branch_ids.get(branch.id) or []] - } + trigger_display = request.httprequest.cookies.get('trigger_display_%s' % project.id, None) + if trigger_display is not None: + trigger_display = [int(td) for td in trigger_display.split('-') if td] + bundles = bundles.with_context(category_id=category_id) + triggers = env['runbot.trigger'].search([('project_id', '=', project.id)]) context.update({ - 'branches': [branch_info(b) for b in branches], - 'qu': QueryURL('/runbot/repo/' + slug(repo), search=search, refresh=refresh), - 'fqdn': fqdn(), + 'active_category_id': category_id, + 'bundles': bundles, + 'project': project, + 'triggers': triggers, + 'trigger_display': trigger_display, }) - # consider host gone if no build in last 100 - build_threshold = max(build_ids or [0]) - 100 - context.update({'message': request.env['ir.config_parameter'].sudo().get_param('runbot.runbot_message')}) - return request.render('runbot.repo', context) + res = request.render('runbot.bundles', context) + return res @route([ + '/runbot/bundle/', + '/runbot/bundle//page/' + ], website=True, auth='public', type='http') + def bundle(self, bundle=None, page=1, limit=50, **kwargs): + domain = [('bundle_id', '=', bundle.id), ('hidden', '=', False)] + batch_count = request.env['runbot.batch'].search_count(domain) + pager = request.website.pager( + url='/runbot/bundle/%s' % bundle.id, + total=batch_count, + page=page, + step=50, + ) + batchs = request.env['runbot.batch'].search(domain, limit=limit, offset=pager.get('offset', 0), order='id desc') + + context = { + 'bundle': bundle, + 'batchs': batchs, + 'pager': pager, + 'project': bundle.project_id, + 'title': 'Bundle %s' % bundle.name + } + + return request.render('runbot.bundle', context) + + @o_route([ + '/runbot/bundle//force', + '/runbot/bundle//force/', + ], type='http', auth="user", methods=['GET', 'POST'], csrf=False) + def force_bundle(self, bundle, auto_rebase=False, **post): + _logger.info('user %s forcing bundle %s', request.env.user.name, bundle.name) # user must be able to read bundle + batch = bundle.sudo()._force(auto_rebase=auto_rebase) + return werkzeug.utils.redirect('/runbot/batch/%s' % batch.id) + + @route(['/runbot/batch/'], website=True, auth='public', type='http') + def batch(self, batch_id=None, **kwargs): + batch = request.env['runbot.batch'].browse(batch_id) + context = { + 'batch': batch, + 'project': batch.bundle_id.project_id, + 'title': 'Batch %s (%s)' % (batch.id, batch.bundle_id.name) + } + return request.render('runbot.batch', context) + + @o_route(['/runbot/batch/slot//build'], auth='user', type='http') + def slot_create_build(self, slot=None, **kwargs): + build = slot.sudo()._create_missing_build() + return werkzeug.utils.redirect('/runbot/build/%s' % build.id) + + @route(['/runbot/commit/'], website=True, auth='public', type='http') + def commit(self, commit=None, **kwargs): + status_list = request.env['runbot.commit.status'].search([('commit_id', '=', commit.id)], order='id desc') + last_status_by_context = dict() + for status in status_list: + if status.context in last_status_by_context: + continue + last_status_by_context[status.context] = status + context = { + 'commit': commit, + 'project': commit.repo_id.project_id, + 'reflogs': request.env['runbot.ref.log'].search([('commit_id', '=', commit.id)]), + 'status_list': status_list, + 'last_status_by_context': last_status_by_context, + 'title': 'Commit %s' % commit.name[:8] + } + return request.render('runbot.commit', context) + + @o_route(['/runbot/commit/resend/'], website=True, auth='user', type='http') + def resend_status(self, status_id=None, **kwargs): + CommitStatus = request.env['runbot.commit.status'] + status = CommitStatus.browse(status_id) + if not status.exists(): + raise NotFound() + last_status = CommitStatus.search([('commit_id', '=', status.commit_id.id), ('context', '=', status.context)], order='id desc', limit=1) + if status != last_status: + raise Forbidden("Only the last status can be resent") + if last_status.sent_date and (datetime.datetime.now() - last_status.sent_date).seconds > 60: # ensure at least 60sec between two resend + new_status = status.sudo().copy() + new_status.description = 'Status resent by %s' % request.env.user.name + new_status._send() + _logger.info('github status %s resent by %s', status_id, request.env.user.name) + return werkzeug.utils.redirect('/runbot/commit/%s' % status.commit_id.id) + + @o_route([ '/runbot/build//', - '/runbot/build///', ], type='http', auth="public", methods=['POST'], csrf=False) - def build_force(self, build_id, operation, exact=0, search=None, **post): + def build_operations(self, build_id, operation, **post): build = request.env['runbot.build'].sudo().browse(build_id) - if operation == 'force': - build = build._force(exact=bool(exact)) + if operation == 'rebuild': + build = build._rebuild() elif operation == 'kill': build._ask_kill() elif operation == 'wakeup': build._wake_up() - qs = '' - if search: - qs = '?' + werkzeug.urls.url_encode({'search': search}) - return werkzeug.utils.redirect(build.build_url + qs) + return werkzeug.utils.redirect(build.build_url) @route(['/runbot/build/'], type='http', auth="public", website=True) def build(self, build_id, search=None, **post): """Events/Logs""" Build = request.env['runbot.build'] - Logging = request.env['ir.logging'] build = Build.browse([build_id])[0] if not build.exists(): return request.not_found() - show_rebuild_button = Build.search([('branch_id', '=', build.branch_id.id), ('parent_id', '=', False)], limit=1) == build - context = { - 'repo': build.repo_id, 'build': build, - 'fqdn': fqdn(), - 'br': {'branch': build.branch_id}, - 'show_rebuild_button': show_rebuild_button, + 'default_category': request.env['ir.model.data'].xmlid_to_res_id('runbot.default_category'), + 'project': build.params_id.trigger_id.project_id, + 'title': 'Build %s' % build.id } return request.render("runbot.build", context) - @route(['/runbot/quick_connect/'], type='http', auth="public", website=True) - def fast_launch(self, branch, **post): - """Connect to the running Odoo instance""" - Build = request.env['runbot.build'] - domain = [('branch_id', '=', branch.id), ('config_id', '=', branch.config_id.id)] + @route([ + '/runbot/branch/', + ], website=True, auth='public', type='http') + def branch(self, branch=None, **kwargs): + pr_branch = branch.bundle_id.branch_ids.filtered(lambda rec: not rec.is_pr and rec.id != branch.id and rec.remote_id.repo_id == branch.remote_id.repo_id)[:1] + branch_pr = branch.bundle_id.branch_ids.filtered(lambda rec: rec.is_pr and rec.id != branch.id and rec.remote_id.repo_id == branch.remote_id.repo_id)[:1] + context = { + 'branch': branch, + 'project': branch.remote_id.repo_id.project_id, + 'title': 'Branch %s' % branch.name, + 'pr_branch': pr_branch, + 'branch_pr': branch_pr + } - # Take the 10 lasts builds to find at least 1 running... Else no luck - builds = Build.search(domain, order='sequence desc', limit=10) + return request.render('runbot.branch', context) - if builds: - last_build = False - for build in builds: - if build.real_build.local_state == 'running': - last_build = build.real_build - break - - if not last_build: - # Find the last build regardless the state to propose a rebuild - last_build = builds[0] - - if last_build.local_state != 'running': - url = "/runbot/build/%s?ask_rebuild=1" % last_build.id - else: - url = "http://%s/web/login?db=%s-all&login=admin&redirect=/web?debug=1" % (last_build.domain, last_build.dest) - else: - return request.not_found() - return werkzeug.utils.redirect(url) - - @route(['/runbot/dashboard'], type='http', auth="public", website=True) - def dashboard(self, refresh=None): - cr = request.cr - RB = request.env['runbot.build'] - repos = request.env['runbot.repo'].search([]) # respect record rules - - cr.execute("""SELECT bu.id - FROM runbot_branch br - JOIN LATERAL (SELECT * - FROM runbot_build bu - WHERE bu.branch_id = br.id - ORDER BY id DESC - LIMIT 3 - ) bu ON (true) - JOIN runbot_repo r ON (r.id = br.repo_id) - WHERE br.sticky - AND br.repo_id in %s - ORDER BY r.sequence, r.name, br.branch_name, bu.id DESC - """, [tuple(repos._ids)]) - - builds = RB.browse(map(operator.itemgetter(0), cr.fetchall())) - - count = RB.search_count + @route([ + '/runbot/glances', + '/runbot/glances/' + ], type='http', auth='public', website=True) + def glances(self, project_id=None, **kwargs): + project_ids = [project_id] if project_id else request.env['runbot.project'].search([]).ids # search for access rights + bundles = request.env['runbot.bundle'].search([('sticky', '=', True), ('project_id', 'in', project_ids)]) pending = self._pending() qctx = { - 'refresh': refresh, - 'host_stats': [], 'pending_total': pending[0], 'pending_level': pending[1], - } - - repos_values = qctx['repo_dict'] = OrderedDict() - for build in builds: - repo = build.repo_id - branch = build.branch_id - r = repos_values.setdefault(repo.id, {'branches': OrderedDict()}) - if 'name' not in r: - r.update({ - 'name': repo.name, - 'base': repo.base, - 'testing': count([('repo_id', '=', repo.id), ('local_state', '=', 'testing')]), - 'running': count([('repo_id', '=', repo.id), ('local_state', '=', 'running')]), - 'pending': count([('repo_id', '=', repo.id), ('local_state', '=', 'pending')]), - }) - b = r['branches'].setdefault(branch.id, {'name': branch.branch_name, 'builds': list()}) - b['builds'].append(build) - - # consider host gone if no build in last 100 - build_threshold = max(builds.ids or [0]) - 100 - for result in RB.read_group([('id', '>', build_threshold)], ['host'], ['host']): - if result['host']: - qctx['host_stats'].append({ - 'fqdn': fqdn(), - 'host': result['host'], - 'testing': count([('local_state', '=', 'testing'), ('host', '=', result['host'])]), - 'running': count([('local_state', '=', 'running'), ('host', '=', result['host'])]), - }) - - return request.render("runbot.sticky-dashboard", qctx) - - def _glances_ctx(self): - repos = request.env['runbot.repo'].search([]) # respect record rules - default_config_id = request.env.ref('runbot.runbot_build_config_default').id - query = """ - SELECT split_part(r.name, ':', 2), - br.branch_name, - (array_agg(bu.global_result order by bu.id desc))[1] - FROM runbot_build bu - JOIN runbot_branch br on (br.id = bu.branch_id) - JOIN runbot_repo r on (r.id = br.repo_id) - WHERE br.sticky - AND br.repo_id in %s - AND (bu.hidden = 'f' OR bu.hidden IS NULL) - AND ( - bu.global_state in ('running', 'done') - ) - AND bu.global_result not in ('skipped', 'manually_killed') - AND (bu.config_id = r.repo_config_id - OR bu.config_id = br.branch_config_id - OR bu.config_id = %s) - GROUP BY 1,2,r.sequence,br.id - ORDER BY r.sequence, (br.branch_name='master'), br.id - """ - cr = request.env.cr - cr.execute(query, (tuple(repos.ids), default_config_id)) - ctx = OrderedDict() - for row in cr.fetchall(): - ctx.setdefault(row[0], []).append(row[1:]) - return ctx - - @route('/runbot/glances', type='http', auth='public', website=True) - def glances(self, refresh=None): - glances_ctx = self._glances_ctx() - pending = self._pending() - qctx = { - 'refresh': refresh, - 'pending_total': pending[0], - 'pending_level': pending[1], - 'glances_data': glances_ctx, + 'bundles': bundles, + 'title': 'Glances' } return request.render("runbot.glances", qctx) @route(['/runbot/monitoring', - '/runbot/monitoring/', - '/runbot/monitoring//'], type='http', auth='user', website=True) - def monitoring(self, config_id=None, view_id=None, refresh=None, **kwargs): - glances_ctx = self._glances_ctx() + '/runbot/monitoring/', + '/runbot/monitoring//'], type='http', auth='user', website=True) + def monitoring(self, category_id=None, view_id=None, **kwargs): pending = self._pending() hosts_data = request.env['runbot.host'].search([]) - - last_monitored = None - - monitored_config_id = config_id or int(request.env['ir.config_parameter'].sudo().get_param('runbot.monitored_config_id', 1)) - request.env.cr.execute("""SELECT DISTINCT ON (branch_id) branch_id, id FROM runbot_build - WHERE config_id = %s - AND global_state in ('running', 'done') - AND branch_id in (SELECT id FROM runbot_branch where sticky='t') - AND local_state != 'duplicate' - AND hidden = false - ORDER BY branch_id ASC, id DESC""", [int(monitored_config_id)]) - last_monitored = request.env['runbot.build'].browse([r[1] for r in request.env.cr.fetchall()]) - - config = request.env['runbot.build.config'].browse(monitored_config_id) + if category_id: + category = request.env['runbot.category'].browse(category_id) + assert category.exists() + else: + category = request.env.ref('runbot.nightly_category') + category_id = category.id + bundles = request.env['runbot.bundle'].search([('sticky', '=', True)]) # NOTE we dont filter on project qctx = { - 'config': config, - 'refresh': refresh, + 'category': category, 'pending_total': pending[0], 'pending_level': pending[1], 'scheduled_count': pending[2], - 'glances_data': glances_ctx, + 'bundles': bundles, 'hosts_data': hosts_data, - 'last_monitored': last_monitored, # nightly 'auto_tags': request.env['runbot.build.error'].disabling_tags(), 'build_errors': request.env['runbot.build.error'].search([('random', '=', True)]), - 'kwargs': kwargs + 'kwargs': kwargs, + 'title': 'monitoring' } - return request.render(view_id if view_id else config.monitoring_view_id.id or "runbot.monitoring", qctx) + return request.render(view_id if view_id else "runbot.monitoring", qctx) - @route(['/runbot/config/', - '/runbot/config/'], type='http', auth="public", website=True) - def config(self, config_id=None, config_name=None, refresh=None, **kwargs): + @route(['/runbot/errors', + '/runbot/errors/'], type='http', auth='user', website=True) + def build_errors(self, error_id=None, **kwargs): + build_errors = request.env['runbot.build.error'].search([('random', '=', True), ('parent_id', '=', False), ('responsible', '!=', request.env.user.id)]).filtered(lambda rec: len(rec.children_build_ids) > 1) + assigned_errors = request.env['runbot.build.error'].search([('responsible', '=', request.env.user.id)]) + build_errors = build_errors.sorted(lambda rec: (rec.last_seen_date.date(), rec.build_count), reverse=True) + assigned_errors = assigned_errors.sorted(lambda rec: (rec.last_seen_date.date(), rec.build_count), reverse=True) + build_errors = assigned_errors + build_errors - if config_id: - monitored_config_id = config_id - else: - config = request.env['runbot.build.config'].search([('name', '=', config_name)], limit=1) - if config: - monitored_config_id = config.id - else: - raise UserError('Config name not found') - - readable_repos = request.env['runbot.repo'].search([]) - request.env.cr.execute("""SELECT DISTINCT ON (branch_id) branch_id, id FROM runbot_build - WHERE config_id = %s - AND global_state in ('running', 'done') - AND branch_id in (SELECT id FROM runbot_branch where sticky='t' and repo_id in %s) - AND local_state != 'duplicate' - AND hidden = false - ORDER BY branch_id ASC, id DESC""", [int(monitored_config_id), tuple(readable_repos.ids)]) - last_monitored = request.env['runbot.build'].browse([r[1] for r in request.env.cr.fetchall()]) - - config = request.env['runbot.build.config'].browse(monitored_config_id) qctx = { - 'config': config, - 'refresh': refresh, - 'last_monitored': last_monitored, # nightly - 'kwargs': kwargs + 'build_errors': build_errors, + 'title': 'Build Errors' } - return request.render(config.monitoring_view_id.id or "runbot.config_monitoring", qctx) - - @route(['/runbot/branch/', '/runbot/branch//page/'], website=True, auth='public', type='http') - def branch_builds(self, branch_id=None, search='', page=1, limit=50, refresh='', **kwargs): - """ list builds of a runbot branch """ - domain =[('branch_id','=',branch_id), ('hidden', '=', False)] - builds_count = request.env['runbot.build'].search_count(domain) - pager = request.website.pager( - url='/runbot/branch/%s' % branch_id, - total=builds_count, - page=page, - step=50, - ) - builds = request.env['runbot.build'].search(domain, limit=limit, offset=pager.get('offset',0)) - - context = {'pager': pager, 'builds': builds, 'repo': request.env['runbot.branch'].browse(branch_id).repo_id} - return request.render("runbot.branch", context) + return request.render('runbot.build_error', qctx) diff --git a/runbot/controllers/hook.py b/runbot/controllers/hook.py index 81cffcbb..055a17a9 100644 --- a/runbot/controllers/hook.py +++ b/runbot/controllers/hook.py @@ -4,39 +4,63 @@ import time import json import logging -from odoo import http, tools +from odoo import http from odoo.http import request _logger = logging.getLogger(__name__) -class RunbotHook(http.Controller): - @http.route(['/runbot/hook/', '/runbot/hook/org'], type='http', auth="public", website=True, csrf=False) - def hook(self, repo_id=None, **post): +class Hook(http.Controller): + + @http.route(['/runbot/hook/', '/runbot/hook/org'], type='http', auth="public", website=True, csrf=False) + def hook(self, remote_id=None, **post): event = request.httprequest.headers.get("X-Github-Event") payload = json.loads(request.params.get('payload', '{}')) - if repo_id is None: + if remote_id is None: repo_data = payload.get('repository') if repo_data and event in ['push', 'pull_request']: - repo_domain = [ + remote_domain = [ '|', '|', ('name', '=', repo_data['ssh_url']), ('name', '=', repo_data['clone_url']), ('name', '=', repo_data['clone_url'].rstrip('.git')), ] - repo = request.env['runbot.repo'].sudo().search( - repo_domain, limit=1) - repo_id = repo.id + remote = request.env['runbot.remote'].sudo().search( + remote_domain, limit=1) + remote_id = remote.id - repo = request.env['runbot.repo'].sudo().browse([repo_id]) + remote = request.env['runbot.remote'].sudo().browse([remote_id]) - # force update of dependencies to in case a hook is lost + # force update of dependencies too in case a hook is lost if not payload or event == 'push' or (event == 'pull_request' and payload.get('action') in ('synchronize', 'opened', 'reopened')): - (repo | repo.dependency_ids).set_hook_time(time.time()) - elif event == 'pull_request' and payload and payload.get('action', '') == 'edited' and 'base' in payload.get('changes'): - # handle PR that have been re-targeted + remote.repo_id.set_hook_time(time.time()) + elif event == 'pull_request': pr_number = payload.get('pull_request', {}).get('number', '') - branch = request.env['runbot.branch'].sudo().search([('repo_id', '=', repo.id), ('name', '=', 'refs/pull/%s' % pr_number)]) - branch._get_branch_infos(payload.get('pull_request', {})) + branch = request.env['runbot.branch'].sudo().search([('remote_id', '=', remote.id), ('name', '=', pr_number)]) + if payload and payload.get('action', '') == 'edited' and 'base' in payload.get('changes'): + # handle PR that have been re-targeted + branch._compute_branch_infos(payload.get('pull_request', {})) + _logger.info('retargeting %s to %s', branch.name, branch.target_branch_name) + base = request.env['runbot.bundle'].search([ + ('name', '=', branch.target_branch_name), + ('is_base', '=', True), + ('project_id', '=', branch.remote_id.repo_id.project_id.id) + ]) + if base: + _logger.info('Changing base of bundle %s to %s(%s)', branch.bundle_id, base.name, base.id) + branch.bundle_id.defined_base_id = base.id + # TODO remove all ci + + elif payload.get('action') in ('deleted', 'closed'): + _logger.info('Closing pr %s', branch.name) + branch.alive = False + else: + _logger.debug('Ignoring unsupported pull request operation %s %s', event, payload.get('action', '')) + elif event == 'delete': + if payload.get('ref_type') == 'branch': + branch_ref = payload.get('ref') + _logger.info('Hook for branch deletion %s in repo %s', branch_ref, remote.repo_id.name) + branch = request.env['runbot.branch'].sudo().search([('remote_id', '=', remote.id), ('name', '=', branch_ref)]) + branch.alive = False else: _logger.debug('Ignoring unsupported hook %s %s', event, payload.get('action', '')) return "" diff --git a/runbot/data/build_parse.xml b/runbot/data/build_parse.xml index 66a47741..79903ae8 100644 --- a/runbot/data/build_parse.xml +++ b/runbot/data/build_parse.xml @@ -2,7 +2,7 @@ Parse build logs - + ir.actions.server code @@ -12,7 +12,7 @@ Parse log entry - + ir.actions.server code diff --git a/runbot/data/runbot_build_config_data.xml b/runbot/data/runbot_build_config_data.xml index 6a903831..1d766233 100644 --- a/runbot/data/runbot_build_config_data.xml +++ b/runbot/data/runbot_build_config_data.xml @@ -3,7 +3,7 @@ base - base + -*,base 600 @@ -12,7 +12,7 @@ all - * + 20 @@ -27,19 +27,17 @@ Default - Default no run - @@ -54,7 +52,7 @@ coverage - * + 7000 @@ -75,7 +73,6 @@ 20 - @@ -84,10 +81,10 @@ - + l10n - * + 30 @@ -104,7 +101,7 @@ clickall - * + 5400 diff --git a/runbot/data/runbot_data.xml b/runbot/data/runbot_data.xml new file mode 100644 index 00000000..1d91579a --- /dev/null +++ b/runbot/data/runbot_data.xml @@ -0,0 +1,94 @@ + + + Default + gear + + + Nightly + moon-o + + + Weekly + tasks + + + + R&D + + + + master + True + + + + Dummy + True + + + + + runbot.runbot_is_base_regex + ^((master)|(saas-)?\d+\.\d+)$ + + + + Mark is base + + + ir.actions.server + code + + records.write({'is_base': True}) + + + + Mark no build + + + ir.actions.server + code + + records.write({'no_build': True}) + + + + Mark build + + + ir.actions.server + code + + records.write({'no_build': False}) + + + + + Runbot + 10 + seconds + -1 + + + model._cron() + code + + + + + Base, staging and tmp management + + on_create + + code + +if record.name.startswith('tmp.'): + record['no_build'] = True +elif record.name.startswith('staging.'): + name = record.name.replace('staging.', '') + base = record.env['runbot.bundle'].search([('name', '=', name), ('project_id', '=', record.project_id.id), ('is_base', '=', True)], limit=1) + if base: + record['defined_base_id'] = base + + + diff --git a/runbot/documentation/images/repo_odoo.png b/runbot/documentation/images/repo_odoo.png new file mode 100644 index 0000000000000000000000000000000000000000..a7eb3afaa16bb6c477cfc774eabca6422358ecb6 GIT binary patch literal 36084 zcmc$FWmKF?yJh1LEVwmp!5tbWIKhGh3GVJr;}G243Bf{e3GPnt;O_43Fnoucd+(Y# zcg>nv->j)WycFFoZ}8s$003kuNl`@r02%}UKybjrzVtxWe7t)3 z0qr0xr40YlJm3w3Uw+4P6jO6lvN3ja(X%%Km{{9b88J8**c%yHJDA!y9z(SX001Na zDbe@JuIUGh-X6-A8P|`hgbo1~`GwSZDDFmvyt`2l16@m^H7V{a?E_tn`Lk5Eh>oOc zP({8UCWNODi#}E3VwJyDL$HQlq_9RKPhd z7>y&Wf0De(BUOXeJ9+frJ7ace+4dOO21a_PyRY2I_#{F>y@jpXuSZWGPh!@Pu zlKuX5$52~Y?06yJva?TCZq4AkED>CA0OE&HR{pf+AbhinNAEV-T}{iR<^4>i<>}0~ ztR=ZwC%3l;D=6>g)6K>Q0=RdwmsowZa*3yqTZkJ_cP+>ot2qa++Mk9Tn|`Zdk(_`O zD35b+fI&3gSOcJgKdV(#CRhWLOFcZCS7zAk>(Zu7_TdjAQ9Ycu6a>m3y=x#ScHF%< zENY$Onvj^&Z;Tngku5j9O?vmbPTT#_2Vf{&ai4VlYA<;R^R*sI7<<0t0n6@qF# zmBd@^X(h$3oRW}z%s$EiHL7!Xp^Y%+Xr{QW`F{u{6x8# zDlE)(iQz74Gezx`;(1ekhsV~1wZZN9A!xfe_i5IBFG-f#VE@gYwzSZXFj+b%HA?xh zn*+AuR2PPrMHX0CTy;l|snH3Iw$70WC7oxau--$X!%Bbx$(VNUx&~|VnJE9|$W)bm zWZ21%&FU-dnE|Z7HZ;b;qR*$ZLu&T08x%*H>NVC~$o2`T$w6go<^~DT=5pl}XXU5A z(+Z(}p|5iv$uYRgEbyi@XgEXb?Ywj^vHi&+2$X6XpVe`C<-4q; zAk4eIv7{M10ATK`uLr2gC)*-5I0xtnlRhaxHS#un*+|n0;N^J!@Rt>BD6(!~V)eD* zaz%vN*c2Gge}=?RW) z^*IWmlgN>o^=6u-n+^_kypI+AO*T6nWe~nDnl=>Ki4tNM>&q$s z`8^LLlaGJkI$+hwvEZ0}4st2n9V#H->*}8t6bu-xIqRDy&6>qC76a zOm}1u7hXP6iZXt9M^IP9WHaV-X0kOe60YKu=mMQ`%{8m4RD z!{}oee&m8taA>~sd&^ld#UTD6S#0N0)`L#oH+&f9y4#8E z96BUpv*O=J2B6E(* z@8qV66o1OC(D8|;z=h@U=?ld)XI077d&n8P2qt}%Le$lmZh=HWRFkxdHVA4I};iIqlex ztLh|Ip!s7wS!&jgKEL&|BuN zr~mW9xA9tD?ZcK|5Q=T`9TXXj+^7uYbcY|qxD6w_aeqA0Cr>9SaIS6`9ELaJhZp@) zyYa~?W>Mc>dv#z@(G5rfft8*8ubaD*(9XT9v`j``_l1X3@BIi=#?Q=g#>nwNy;urv z@-S{Wu;K~}Ek7`h)4$ZYaE>-mqo{5wXg5c%4RZe2o*GMl)c%ogxXrF25Zir6y&o*2 zCc4xFEhv!J9FM1JrDq_)Sv6Ix@6H*$m%n=io}Jh>v3n%*AQ$}H(z#wx%)U)NU!v=S zH6T*A{lGBrLBczizk*sw%hTuT(XP)i&RYZPQ|%TKG}E6vX+MT%jEoW%xvetV#1>ig zJ+#~5EaYcx7OPXP3L* zOnFA#ZjOB>Dh9KSW?SXn@mgiZ#Hqwwfvtre(Uo$*N3f%ARwnbROMAMS(9+vgSf7Mo z2WID!+^n0^U8Vx8<(^Op0(h#k@1GlDyW_q-&f??qjJW2$+H0^TNda+tOlY}-2~hkl zSzuoNND?<~LCV(6w7fZNbYG$To+d3;Ix=RUS_(p#$*O=~MdozH74BSPQ+Z*-7c+p< z1>K`I2v^KibE}O=rwKc1K42+s*ov+H*7kn#%gx|ht7puu6)VOWJgQh$n)D6+!@kg& zvZNE$R)KmQCPe%sMdm?dsRQ`mDbUvwtPeyuhVt$A` z?K_AW*-BUp=yho-YN|ru+DBqrkmS(yJRD~py`*}VKD9fGgckQr8dMMz=o~jRJ7O?r zXI^g33*#HVicbX_hSiKF* zveq_R0{b`C*&b(x2=b?g8^zg_G2X>}+vR5$&M|;jIV#~LjiTba8towtm z>|=Ny@qwINOQ%#ol=0uV6iqn^4LCe-oxWb>ZYQ1t2%Z_xI$w|c(GbBq`WD|Ilr*?C#pMAW;#iqj;96vKYc#WuQU+r?2j`UgtWnNi1_AF7?fHCNZ z%Cg<2C1^2Uy)q`Oi#aSDe`8Uho4rhG#Kv5GX+~<8;S0zLNDR4}^2i)!?SuxlgPBAv z{>s3KzZ`!Y3r-ailxOLYKXsyNTKsT$cX;6e!o;gw{Nh~+cNeSn5;*Hd_ELkmegyxx zH;FMptg@Ekh_c@4s&rU@0lH$(l8I6)Qja!285|h4qVE@tjE?HBlZu6vBW#a>SGblW zv_j);X>AG<`-GUh#Sz@}yN%q!pMvpY%o(0-oqq$h#!l-c4L+B3ZUs-&kx9~c<+Csj z)$1U;D-&qIV>J$bIr6{)SCiQ8*;aOkfHqTeX6l?09^(cR2% z8if;;9z#L=*yehh(d6SsK4*Ft1b>P)d*fZ6{3A^qDtgB}uT((jM#tbH%62V9v)zx^ zBfJKpSub^pbF_x{13L9l|0aFYxZo%i&QjX@4m=n>KDUcDhpS1}U!|y{51+Zldp9(9 z_FlJUgUn?QKC-BHtsmN=_wx_pvK%9e;8GXDjm3lBZ9>;mnS<{J{i&S#NA`&Vj~KrO zQBspisV3mMKC3@+Svlr4$!I8d$1CW~IYV)-8&c8c&hA(P}7k#8L2BCS&9+WlV8m*_luH*ZTMi$*=cw32n>A7*artN-H?x+ z6H%T+(Yq|u0y$hbGU()7H;RAPC7=nNiYjrE=kJz{zvu+Ug1Oxm~R>+=KHzw%MWChU3Dsq1*Cp>y44wUSsupjv zi$KkqZ*!*;wbco~My(!yxX3A*#gkSHzJS$YvE9TvSG|7lvGIZkf8Oi9ztvuPSj6_a z)hAdLOm+<$NO>wzz|9)bJ4j&Rd7hT@D{F1f9$Q`wMAK_Z`wU3%~bb zubHPQrxW;0?ZO3hT{Zb61v5m{ws8@s0qHfh)CYKmGuix|5bEpxD}9k!@IBZx^mRpp z)4lQA|D6;B^^j>XI1~OCBMN@Au z)Rtwob2VXrNx4Y&tvbIPM=C~WP@J4ju18vXY{VCT>D0jywLE!(XEPsIU6voQ1fGv= z6s`}nm+rQ*&Yk8zKIYy;?eIQ0?>~RD79kd_TPM&giheyAZNxgv`K&kAVzIp?#nihR z&R;EI&VIxKkxSQkexhDV!NPr>5$bgcn{|~Zf}1B$yIyeS0k1K4gn;$&9FFLF)&WZk zqC@@Upnu?kI7ytMKd$qn>Ksv|LXT(2&t@dVN4!zwuZ=9lJW=GJjH9k68-fXyXx&#pD>vQWzR^oYg(^LLWqxP??@9aE}BbBy%z9s$aU; z(Ja(;<${*EQH$JUFJC&au6p&!43pBheOkJn2<43XAAU*Y3(9HrnQo z$Go*Iu>bOv7(~4tG@!T4+c3h7C?WghudBxbb%j3*sWoqSx_We(moP7e_8; zz0Sn_w(+^1pdLY1RNRahP%B~6Yjzf)zw!#QA`^Aawkl>01qD~mm?E|`jGiOkOfsB}in_WZ9V_Ge6HHhu zuflr@azeVm!Y!?qDc$nvUSm14m!r8k%o8OtlG`c!z9H!us!*98MzPTnpxs3_O_2POBNKzsqaKPCw z^vNkfov=Od)ZCfC!5HGX?VAm3IQ5)^=unJjE)oI~IX-LOA&LQLlyt|1|4Ip|A+Nrv z49C?<5z+}4!DdEBzxRV~e3HECBC{VE;*kE#Tz9lp@o<#loqwn!&bDa*`*4aXs-F^` zmLe6%AJ_U$KyvZ&}6?FGen(lCbD(&l5 z#fe)yh~1WfWD7TMZS>IOG|~@8INo~X_>cDRd?we;QVTflWRogAU3DN(v-tvl6n6UL z>ztb96_1s6tv}0m({kw7srMP*-!VX=5bVzFH1hMj1;P+l*WXn&_@haD7k533{TOR& zhf7OFd+G86+wlcdlF1pRL^?mA=LHmb>jTGG%L!xIV^u_1!~mZxHOI!Jy{^xP9*$Zy2L^9dp&lI{ z*4%gib>V0!DI2cOOtjQm1E1WrU(J+JDvQ_sg-1H~MBo33-{O5X-Sx}l0|k2fp|j2o z>mO4_zhY^jx{9knaU7jmihu!$YudHQQ~$|ES?{;`S3uP{DS@i%Gp(P+XCv;+RbNdE zjr&?4frj>MHlK#}5Bh4!xJLUskCAPs{r@ALC@BXQM?Doq{8kK&PIa{0-J`^*=-|23zxH z@Y~_8cJWt)bGu-#+X1=|rkM(?HE>RmsBddG9qjXMPrA*Y%sru(zv-0E*dT?UB&&$* z3}8JRp0QR6Xe!$z=l?{mK8%d*3IW@5BOYI{&B0y-nyQTtUhyleCHn3T7g$?%gE3URy=i&jM))6`(kZYE?yEQ~_vkiOduaK}=vS{kWw>tBcQse}PVw8- z(MFx^eZg!*F8>BSJw0~u8K;6`rp$gl?X%S8(5U@&v*jLysj-(?@~=B1%qy!L>qRPI zn_kH|8NTIAvB@Sf&7Q1>B z4eWnkw2v&<$widFK3EQ8F@k6j4G_)mH(}m>b-|b@vpzIn_(`l_OTr|tuxNoR{NabS z)O7i)r<(Of$C`i6qP9l|Bq36#7xo_7ZJ&CsruRWbo0xy9DLVhErT}GfY#j)Y+uttM zCX8k6!O_VX>Tq#SgHxWm#A$O7gAjK*V(!-lilfi;uWWWbd%tZ9U7;{XXp zT(Lm` zqUhSPM!GprV%?jYxizOumT~wGMOv&mVlSzR4@(@0#g^h&n7QV~sbz7Dio7{65qP8| zZE-@lwwwur!JYn8zL!m=dFuuhK}1}SQ_b`GM)fnugtpPX+@!=&R)kgr23=*eZP%Y% zw_O|kixr$DyF;D{qLJTH6VSN5Ew*F0P05k3iqSX>UX{nGM!= zM>H2C%w_$2oAHt7?K`BJtY6(1_c)52jQurpaD}C2q-%XS#S=lDi6W;N2$UE4cln^lD>qKIC>35uLtLx{(RuQ8eY1qZMq6gmkh-ZpWR9UA z!t3>YMWJsNUds1T+B;+$zp?-4!!9zL-{0jW3jTNhS*rgx7n1(VDf<6tN z+cS7yjHvZJU1^bEixvFoX2Cgi@7wC|>_le%{K+_5^74$Q^$5)?-r4TjEb#ac@Ex2R zNkSC*e8$*g+|j<=+Ly12@0u$Zc6a{>V*Hpb!Yg_IjD#-RKm#rh)!>IkQN;?QHe6}I z2ia%u$4N^qWn8*i;!Hn7ybj8`GU2n{Y)vM`Oy!jnswfESvn)LvP+MMlch_b@-%vZH zaP1K+X(Na?>lQzx({^Ep0(!fWd5hdd^@*9E=xhBoY>UBikF*W=d$ErA={u51+405e zN}3}ByGMNwiH`qOx8U2F%=9n412=ay*U#1B2N=|4LnLf)blbPqZ>tPG@V=DZvKK8n z9~%l1Qf2M61`K)$vIiO+P61#ySrCSz-Wt9lPV2o%jAtNWs7KOK7&Wy-8iD!c{rge>h zT@2l7jU8R?23^QZo7bwVE}7FB6tBx!V7G^v@ZYh)5fBv46BUNC1S~Gx(pgUkM#gYM z(yADHP)N)^8<8pqK%nD*llMB&N-qmk`NBaftW%P{v{!e_vLQc*m#vHKuFNs5^+ z;=ddz7(-i~?I)O1{b0gqh@fVg20u5%hc>O|1h1M${SzD@1WCAj4xDa`*p#nbPp^u- zGubcH;mimxq5BSIfGA|-EJ4yu;1gS~8Ywql`YGz%3tK?=exMb%n zHB2<%b&6PxlCk?mBIg>Fp^nO6G@F_bpGKNmV|w?80Q_Qa-p0Zj^{e;gxVLhF@kkJslr(#-cB32J7N(Evg8_V9$CDY?1eb$g*65g?x6 zVr%hw$wLO$)MCh{+)%l({5R5#HCONYT87-F+~B4Ir7*7EI=e`KC5q;>o2?OwOQFe+ zMabB$E2ne0W32Fud!$wmYntw9kFzv-lf)g%0t;n7Q(``HdAI9*r2PlJFS zT#-bk1e*o6O`a>)%JHMapn5KxHC6zUy)|RoBnB1Gzm!lf`D3v6MB5RH4e6~h&g$@Z z*)LXSdlqhThgqvMxU8=b9(1}f^h)eSovjj23vFj+ z92^h~v=b`eehPn1gKbKnNP)Dt=CRIdu-1LX)ZHgdgyi)Z>T(;ZSG#Uo8)g2PrbJsf zqbfbHKUC~hf@kThiBV8>(o*T0q{Ur^?uk%t5@W<$G4$W+NZfw!WDL>+DWoVEVm%*0 zJ?R$@GY6o9Xi2INo08Q`WN-ywz2S`tzb%7qZ_`=VQ{bXpPYiTOlyds}p1!zYv->`j zSHC)=;h|g!Zbc(GF~_{$_>d1q2F?;a)H19@*#xRL?~{xP_yfmLIN`dl1!c^fdrl3? z`hdt1JVBr@{m5QSJ*6nZOePMtf}mCnp;F|!-;s;3JvZun986gl$7QO~Ms3o8;}|>lp5BX%WfvF>yAEBkj^tmQkR-;ye%_@T3@ z)fKAcNVx$zL@Y%d^<8DG)ZL*RQ0U=d8TJ*aaZ4A-C{)br2WXq+-1eI%a>1G|0>2tr zArCdu*U=_v7Wi)W@C_=5pA6`^FGZbBB~J3)ZSd5zy4=MjV<4VH%AhYze1OJzO5=k< zp1wJ$M=q`vhQ980eHP*!k{A3HIs_i+oNho-Z4~QmxRuFJknM~lisi)EeuvQqGMey7 z7euiUSaRjH%JA0{wTZ6LisrJT*UT7Dt4jNLLIRlb<(zXyg~g}=dSJ-%Xj`xAzJbM# zU6i{R5NOJ2V`D*ewk8GDZHa%Yiujn7grE(D1eb4y@&J;9HX|$p?$gQL?}IDAO?B+T zf0)s4&cEPMIyL&`iJ8rwj0jeBxZd(1iRWXuJh9Ub44lsRge35JP){^Z494Cwgewr8 z6~rs;z{=Iy8f-zoDdmE%rPhJ>yd(cRD`;GpC4{+cT|Lb?Qcxj!geGPzx#zS4rx-2B!%Dn zp}=|MtDTdX>{hZJ|9K{}xBE9U^8W^||M&$j2>F0NNWY_@(K`w}io-{TCQkbxCG|pL zBx1;Uc2r&chuQ`Zf|7caH8k-XJ%4Fj^qP9_nPe5`GyEmeqWK|W`Nl9_e0(A^*a}Xi zJg|_GQn}Ujt>Pm45M8#R@ga4qczrH~wsyzFJyaKjq5>r9ka16VJ-s)B-IS-e&b+wl zGB=&VU+8gz?eL7Wc8;WmT=e(KS zMk|0|7xcOHT#rT8P>BqqphD|8&KmvaDl)#5?edSWTRZZd6bj`p*{7VTH#fI+x$|>o zBst^4tO8kc?^P&FF2c1+B>1s=#9=)R%0~`(dYPvw(dm1N_kNB4XWK!qEpf zaticCq4LSBUs@Hb5q0oy+o-FYje%ok92$7&8@Q$wuN9LeZO{)G`QnT#bW5YgwrSyG zqbQrqIc3H1>Q9-$s9nJXZpwp%(%Ve6V}xPowK$b4+4lB?x|fzY)|XTr$Li6nwsQ0Z z=9$DvR^?e}S#9m;o4_6!^Zoy--XMo}uvoOs6LBdPEjZ7bIQYoY!|WczGzEr#0- z*XMjojQ*b3sP3+&Vr>l8FNE($VyWRNg{gNdrc(#;SBq zX*{|e^w>*lkVs7~^y{nhJ(>2js{Q)dGOQ51Exbt;7n~y}F4O`tg!dY16#|kE`&bVf zAx89>dlYx5PVC-(G>G=jtd^tWcqYP=iist+G~Lye`is1n^q+rKev+o)J0oLyVua{2 z(`bxj93k7|V*g#254k=3v-f%ENry+O^aIm*Yg1tdvgX^k2kU z7yvq$3$fXICjN)9TP-3lYw}B%O?{ANT{@l+tZ_{caJ!Jny5~F4UL4QImw#Sln4_~* zjViFnz&0YXt5M7GlET&lOd64fKXN$m!^1M)1+pH+XAD$UW1#H~l^NqR)Ne`+4DE%8 zTMK%QOsL|Ak~huAIZpOMX>nQ0t_DADAj{$5WgK=qd8JthKR|&b35*oPLkaiPL>epMOxay-fsk`9$)=>19cEsg4|-ZZgrE zZGIu6mfO>^2Ls$16c@ZtRbaLI0g|DjIaAn!BVpPfMep$S4^g>C#M9vukqB_bdn)7& z(=RlM7SMD%x0uJsp~yL7;ph*hGfcU}HBYl6OIAnbai~*5N0s^05)ED?XLu=$OY$k0 zi?x{A@S_b|W$I%VD=C_Cn}lAPCT=Oj()l5eHykD_xDX+okI^aes{ubJnityXT?E@R zP##b-^f2T8qHQHpad}oqpd5q+h~|0bID3M1KKUFMoeZB|`B#?~k>Av$naMxL+Shj1 zNVJUXX>I4f8z1Z-5x%mLR_D9@g@GIhg+Wr59(4YzVoiZ8W8Um@Ysl0k#2YFV=IERY z?>9~$<^p=-Nz5=&`G8?U2z>l`(;G1&m`Q;PiPNx;HgrW6^LCGyO7z1H0E8NTN8XaG z+z}a!#?@@cNyX5>&-4>9B4Y<)m_x91*!im!_~x}98Ai1pv2mI2Sy|rl%=swCilPIr zklIL&jfp2<5srPzcTEcK`KDeA_GdKs?xAmd+@Mt~vI{&3GAgb^4wt$@dtU@02|iFI^eNm4^#h-0=PhFN zcvvI=*E0Ou1=Po&5#|pDpkY3bY3K2pdW(~K`O(VF*gLKl z=VXPGNStYtq?GbnqtrlmthFIuaQMy9H2tE*7V4d9AvyH6UB5)qtXgIGMnW#yIU)kG z;Ts{BdS#9T#9ppnKmX{qV|^A?%Gv;j4~_MnUk5H zL*5WLdNlkLLHlj*3FIVbW3(8{`BKSPXEM3V9n-~RwQ%M_Znaz++oMPviX2z!^Wz@x zhqzH8ZDBwYW}2>?t4Y3dbKA~C39`aYxK|l(st*JcFd#KzT-VaN>KY%V+$SI!$e@ke zpeS7D)K{la4I_-xWG!~C6PT-IHivgq?M;ZOJ7Jx2{9FJBeSrZjc+nm?w@c#GCI+ZK zc4N1vx~m0L{6-F2-yLd*Hw5pYd*Z z=BWoji=phN41!yk8Df0r(ZmcPbbkTQq|osf#dnOfyiM>WQ_qPflg^*0tf?57@{R0_ zW|mmDf844o@PTIvQ`N&3&%5csL4?RWORc+dcK1P+1bg-8BzqJ0MMqtd=kSU@Dn0Jr znU(bi;?JUE-=kdeOMAeshSIU=g<;<>%=2*n0#5bSFPfEfm(*{6pzn5C{`c2n*S}wu z^1lVa&EwEOY(fiTB}GNy{=v-vl&tI|F7p&E#f}B!MG;OHL9VRWSUh@E?@E0^r&E;A zT(G+}umaA-DY8<#9{5*UGnLSpnNt(p5;04eAV*Q2ftACz$1z}@<_L49932s+up3JH%^71n-6SG1hs^yAW=pt zu#2{gXE%jzBxU*Lmr~(OSBDMDKf7!(pm$N3mNgZK4Fw6 zRD5S|M;w6(0amQj{8;m?cT4^Uh$)wOn|%(1|H$9!+ZSLqh>u2f(g-irNbrn`t>iRr z3P6F~S9@G=Zdj5Q&QEKiIam9*mkl1WBwE0S)4jYdsB{=(s=HKc9aBjxGyB$DK$Te8+Y?DmwoOIv;;Iz+!-Zl9sy>S82CGMSsF zN4)$8n$+<9)WU^Y;_2n)8SxRlr$xJDNLkR?xmr~(GAwa2TpZ7O`>^Y46lM%3a<_H; z?miuIy=NHs42oQ>6v>pl$i$9k+SqeKCn`YTX zsrIMOZyu75LvuLOig=P#_w`9?XSTFc4XkfCRUAG@gbmXvEEB=Wid4EepZd=rBgf+u z>jn{+(ok}hh4TzZZZTwz%vhurdN|P#T(_?@+9%^8;;towp4E>Ceygj0jmXf*m=TvD zyL2Zn<+tcxUV^^)PX8;XdP-)mQ`&X6-ZIusQBlp^On}y|@Kjl`+>*NHUOU^>iRFs} zhMDV6A6A^V-woEK^KO27%b)&IRZDbYKqju|X|*99o^(4n5R5P?($SlBP-2<ym zApZ$s#EQ*+dQJF^D)!m$aLM$6L`7$xl_$F{sR*VBH^iiJOLa~=CyRHpP>u(NllVBD zS&aGR<;iO%#~qagvCRd1rIVulh_?q3BdZA=FQYs}t{!pFe^w1TPU~{kg_G9#-od#} z`7*j1iTmOh?Yh&dSr;55=k2YO~#HRg~0Qr5g-emVSy=OeIN%E_7IHU z@2}%ZD6fL@0DtIvc4c-@!BkXJ8;k{8vMWyl1tngCmL{Np3dBP@dz+23AGHo|*T}LI z?Zfa<;uf5dMC}WEbzmo=!yA`h5N%2?1Y z$iuAB|8}4ABZ%&Ff_Gm_sZ`!zZA3pfoUp$xYi7k387y$+%aCyUrMv0M67~wjTPQX5 z)^>biqZ5#2bQ~E$^xAIS8LG5d`Pl}kzZM3($jtm@f`Au{41@o@ z@SNp_T1pKWpHahdq9Xr+n!AX4fokf8RVW*clf#=5D;bPVm8pYI!LyNUMEB(}0??#=(M2zkDpP5lZG! zq@u29_STw1)|z-K8*eZzVX&XD_%&oa#mKpdc0XqNr*^Issdcrg0JDzl4-CEUK(MRX zLr}>j0nXa<$sNT&uw-6m7!LDgEm7lF1VU37&5TX!1qkLZKqf9^q;7pcZ6l%1v_G?F zae%XmISX9DEu;@=jM*%P9ZrIRN_9MIn_&bw$G#nFoO??ED>6J~_4)G_gmK%K-hC(= zexuu>cSUABp1DQOy@h767`XH^mKiKpwfdYsyem-jxev5y%}X5dtqML47`l})w$uKD zjnoS36zK_RskQ!53~tPj+L{u&H>y6<5jcp6*7CyYcN%SoEA8tXdYowu2`T=~J}0Y( zCk7#VQ+Kdz*ley%OT!~n;hqg|%J`mK#dUa75ZP!J6^DONpPO=u#<5bxgh%i9 zGe31&FX&LNK=Ebj@+VsFGrM?#3>q6wU5AqIU3iJ^g7n+Ndgp&;Q_1jC96U%*x0S~XP--vB zE?=k~s) zDh~%xK3Bl9EP$NV7!sJ8QA{%@sjfiN8D#u+x)Ua8q&bUI_wA^28**}avg|}_OZv}G z*62siP+xZaP3JG(K7U^DFo)4`dbSOO?^NpZ;QTV!X}z~E@YQ3H_7`W#H+mzyII>~A z&X!KTW2|yV0z(Ej%9)&Y)&AeRMSfpAhmudBnTllr0&@$QrGzNbI!ugZWkqkJ<|72( zzDZ`j=33j`;gqy)3!6vGev}7tH>Bc^AEap0Y7E@Q-cZE9elxD$IP|h3Db21t zKEs)~nJ8Rd4(y3GZ__AseIq&NAaxHH3Kj_k1|LP3-OOz{QK&9^bTcL`G7TaufYR7e zn*HQ@s|JglY=jt8C{3kvOTw#Ym1(pH2nT zKvQ(<>JB|>%5IeO#({UPIr-=Ho`Nq9dWyfYt{7UwZKqQFE%(QH;<2jGjjf^$YiDmB zrlpjUfevbOnb)KkV@W(g?xI7`#h`s6_s zjI%c<&7lOUhS zVBC31YD&*Hzh!2(A{Y(jH=S{o8BkuSl9`#ZL$Q#RN9z9F*dlvY&V$}aROIwhDSS`2 z_GiRd{rEmDF}Z{N#<^G^+dvP3SvB({qxH_bc1LY zwNj&+tPr=XotG}x+Q3mBPpsZ@>c<&)-2YO$>vN=%&m7#i-&R~P&Pm{i?Cj)z%V--KM0}arTWhJ;K_9iY|sLKb8fI zkH8U@Tv?4yyz)6&Zs@Qz$JKeQFoEFfkV4x6$0ob$kZ9E4)@h1!F(8xYS7_+F67x1E z=wdVkq%6L$@OfC6Bz_Gy7Y1I}t%}VFwcgW-Dm;qMJOyxj>TLI1mX@$;15gGThNXSE zR-0s;&bh3uAcDrC_S7 zP$Vx-STTI9Mrj+0%#&9-DNXA;vbpG}T(wt^1(KyP6r5$K zhtLh)h?HQJWQIU98&pr1t-Q>|mCeXLV$5N5M^`5&KjGq0J2fQYs8H(1$BJkjdp|}n z379C+X!~8eUy(1^Esa=!Hm>>W8_vLtw>Gl(tpgX=>Q6CU-YMxGQNvth)toSl*7w9i zLyVR&Xj(cjxR#=GP3)dW0#SdjfW7_83Ro5nG67LMi`DLgMHSfzEC@`^&)&1&Knh7< zfS-#}q62u7Tdn6R!bT<2(3tfjg2{WF8?d^fuRg+>sg#2=K+_zf69>Leyfn|Fh=Q5o zSBw2>z5znW2_xJyw@2EncU$e0nonDI}(XuXp-D+QTj zVf`LV>s||>7md)U@IK69K4V-ULWR_ntTFuL@Ww_qIX3!Kal`mfr`F|b7aW#swq1BF z@%>k6JsJHq?a;myI5S$@|}fbeRhXdrWVur5DIg%X$bpd4MwNGf*%Nb(#Wd#`mkAUBRQ^rw~ zXRPDlhfr!STA?I%mH%-Zdu_SJ)`{r>ub=$-FZe60Jbj%%@O){)=6NHZAIzWD zq?zZ=jHIVUM#3a8c$q<&fIh0wjD4w8K$}+9zXIYduBjk0po7a*`g>b0M|V{o?{n^_ z88ytT4;tgDAM)nef)1ZmXfw^wBz1gxQu+(GisDBP{4v6Yb{SomNTJUaCTXBf@CPS( zCC!l`#Y2k7*homMYrd8PqKE8Q{-1><{Stc!6>6Q_|2^X~x9HfkWVW`H1$U9VpDuGv z*RQ|YS-o`7q$d;>Iu3gRq8kRCc83dK-ZIeg%XS=jR57-Qcv{c z?to#ax;eS#)~GN1PdA5xz9ZH#4}rtNzG3;hz1{yt6|IK6GsC0Z;X#NGSDcZH!o3I@ zZ>xkRC!aKKyO$8yZjoc@I(ch|gn@!W?J+^FWfJs@TUin%9vCUuvU~FU!-pN*Pnscv zM~4Rt4x!_%3?UR#cmZ=OUf5VauB4p4DVk0aI4u)OY)%yH6$%u}RGH47TB^}6hEM7Z z{74=FFp(mG+F4Q#uVNOL4;eP=;|8if+(ONc3~|Wr>Z%JJ2MNDDK&j=ONo6Dt*DX1c z^HHSgHJ|eqTDvM-O?+GUxFccQX<1)o>x*S1OdF|SvC?XQn03TBL^>?lDV#Ko>hxpL zbVzL_Kv!LGA9WsKQaZ#`C$L<^}O?t_}dK}B34!O|MoB$>DL{(q8xK_doTHJ%wpU1bEOU7gdGL#G$LRoEt+t^p5tfz1AVA5!=n33Yu=; zs>8|dk4_Tq$6$#GlEJ_IuAADQbF)I4PRvUC8vf_Goy`bt>|A7fN8lKQV|b zVLWyLq{14ty2cZA`Y{Ka7+fThIZ9xN4cXj^SL-s~Tu-ibfK#l9H<^MXh)?^Yl>XAx zKUto3LS&3fb3)3>C|h}jBcW`B{z7HPOVk}J%5M+P3KmksV;)|-9EL>p?V4S)bQfK4 zZrI?|>ly$u7itbiM)bDHpVE!6!KSXjlWkg2R34a<9arCX%je6zT{seP&|CloXpDYH zS4yZ?Ypg`BG_?74#sqZZA$`qO6vmJf8i2nRciH5N;GmZz=@kw1E08X>5EZPfCBoH0+iy)o!KqD zOe_C3CeeEG@aF#N?x7KfPSJ5uc<#V54UXIvXP8=I0R%UI>d-G$2RTdE8l2mLtkpI1 zpmV)--vDd(s|Z9p16@xqY=gHY@G@lmnK0+=x^KpsGNjy;*1--RP@GSMV$uke9MSuL zGr1viE))VQaF}<;mDnd%85-{~-XGMzA%o z{!Iv|$-dQFuNv8cMV|o+un)BiY|e+BJk?@3VC`Mml~?yn)p^YG<@=(Pv#x!E#yxXxfH zAuEU)Ff}bV+n`2uDk(PS`2B&PN5B#PGcVK=)16{pTWbWDI3XbsK4QlH;GvFU-?&@@ zp~X&>!Q9yTWVGt=gS+!rrf;X-DrLLYnjF{Ja!{V*7@Vy1mt8=^iqmJh|NJMOZ9dB^ zgoN}<+{nxp@5h1kT0U(1#FJ{qhh(lJxWoeSof`e;A1c;RD)$=VVe@ zdD;9?n;8B#Q!&2G_Q>#%W<>Et3NFxmA-Wq}fI<2BNigQ-gloOfokBZ?`Hyi3?J+Jo z{vx9G=em%{D@q8evWG1D)>vKJ08M8?xEsvmVgf;wC)JIMk5JyjLJbhVP*}i4pSa9! z07rv8Gd2CtTIg8yYm^0}U^jPI+DDCo&Hz4A&yrgO+)ru~_`tnu+} zqFFiIF+Felj@6nEiVuBN6vg3R5v{RQ*axTX-$!%ERdmwO&g-~)e`WH+@N>=0i(iz) zU6!_}yjxX7Mkk%-}v>Pu-5oSnxHTFTGW6+>o~ospExkO{2p&Rb~$_Y}wI(}UXH zA7bRQ`WMbcyy{t=hc>O{V^dA!n8`*2v6ixO(_}OD$pVeu@#?+WZ1zq;;B1IFt?_iY z@fl9awBCG-U9qE0q2nE4tGB74t6<%o zWnHo@Z?b6N^b7CnFk1)5>$!ZlRmqs@tt?htf4f4i11hUpVlY8YX3G>-^8>;MApE zPCQ186oG>?dm#|)BWx@p%EXd*#Sk!RXZV8-?cS_t?}&)ghBSUq&xfWQTgLw?xGbST zcs_2aX}Cx2J1cc;f*pJxSWgs-pok6al_*(jiKkS>oX$k(CYBoe^7|-vRe4sf6qTfC zV0k>s_(Apv#&N8OX0!yCig<-svneUx0fC|U9m*4wq9pgO>pfWrfSUFDve>0pfwgVS z(j=8%-EIp@1M3;x84N7kHO@$%+1JhpkUOS!hnc6F(xIF16-jcqWNnuY(R7+ho{tHddYmD5QLDzwY z6Zj0}b;I5Anr5n-A>T>KHM+yw*E41-Upm9*G8yhqzv{noSG2B&iCe#Y8JA)3h-qeo zkHb6n8-15T*=(>KvnIacY4fDxwjMS(agrQ;xMn(={E6?PD`)EYcYN+9DE|dXQ}hbl zJ#i(!hZ5~oXb-Bc)_3*qu{teQ=h#DQ!@ z)5dV#ZwZROi%x9q^TxmW*v{~2$#@&FbbYCQfIhj<8NUcXHhAjjFYjGm_~ra-Ejx9l z|C`Hf-*H0(d(qtYBV46R)8<|d5m5E_L272Y!Nn58KVOit{q^@9a3|f@(N0ZSI8-o8 zYV&@lP$^ZRhEMCMMsL({vypN)+P`uCGCPNyFOr9r@o$9ZkdwKIpZC`w2$S(6y9ts5 zCNV$L^HkC4T+j&}LSHvjeKM&$FuYCCqE=L_@drctLLNLM|5nw_UsU{0+nz@|%HY2+AEe4O z;t}h(UJp0@ufA*vX<*rcoGi(C&d{u&tUDgA6z$AYvmp6y(1ejd&&n! z2kUreSD`C1QpJ%Ljab?djeC}Zc<|ry(r27J3o|ls0df9p2yB68vUlB!!O#`_`Sb9w z71wlQZz3SDDC$oxp+p&`9S-mp%I+nbBqSOhOpX15@$u?W2wQdJvyx0HgKU@0IUowv~pWfhcTn-5a-mSQnFN$q-c9;y~1=Joj5$}G}U|^W!zKfGX7+*kOx9}iRn7elV>l?<>xQzuX z>NY#jmdW5u5rx~L^xhNp0Us|+YfjKi0gPJx;`d%DqS>f*Uikzbp& zv`|#`U>u2TEYe}Wd|TK}l5GLa-&?mwc-#y*NM@PJ<#>KQNy;?M=N-dgNJtC5!0sl9G#>437YP{I8Qm+MCwxo-21B3pX%Sy5zj5Rm_|qVa73{1B31a)DuU)F8l6aDF)2?ZH}_ zJx=0!0sroe}RT4rC3etN;h34&}rT zxw~OAcz+U{e+Ao{@-(*lv90xi)dXLY^#PgExfGmN{_Fg?8QnTO716nd=*;@X{+*6A z2H7T{ivaV{D$hWb!Cf`rRzRc;S!6RL_B1$pJL#v$Td-z{_vX*}&V7fY(*C`*6^?L% zjQm!GryO7?H}iGPz}@%zJ=|{ukK6gRx_Y`JTBv(8CI*r3Jx5uTfsU?0xG_3Zu72v;dTTHky{@f*Jbp2za0gGSRdL&jgNC&93 z8;IUV2ONqu8KldAvtby)d8~0h&z`=oAdY84g@JOvh6Z`g`_= z2D=dYTpvdN@)Gd?44_Ni4K4M)hRy~7Gj1XuOo4e)$c}8D;Tvq zH|`rx$oh%)CtDo!3^!r+#t7WN_R|U969n_giQeU==p=sRn)U`!zw8s5^pcOBp?g=p z{tDN}#^`neE5f2T1vTZC~P}Yu((}% zkyB$5`$Nble8yaa?~c>=Dwaf?sRCeey-#@O=dtp@^_Zg9AN5T$&*wUn$6ol9XWbBA z;GoESp1v1GM;5)(!uXB;ytoW2hQkn^FjNd-t5>Mc390bi5FJG#(r3%3_dfWJEZE&j zm3-EtvE!R6tTgUH$!mVupe;cYCU&{-MP}DwTX+C9(_0($VfFRA^nI~jWpJDpc5RFN zo}QrlRkQ~3J{^6fjo1F~78rR(Eva4J@y6@EyPRj>MlhhtD8TT{A}#klNg6ozN8XZ3s|LVGjD z#3w=I5&l0K=rpCK3(u95*>*ZfLlg3aas_}imOdPCZ9nG4Zr)+s&pO=T$oD@3|v z&kwj+KMNA8^=F4)j0qt(%ko)(3LQC3GfQmfG5z_8_I<4gt}jxh#EwKQ$4$aMdCW%| zmjAsMfE`MCH%aTJao(WKnUte5i~kHwx!kuVq?E4fHQ6Ak(qRgd#eXnsLf=V@COsK- z`)#ME&d9Yx@Vu_Eadu-t!djW80+>i1UbeTzuN;H362{6@q46e+#%56GEa?%a!cG9j~0cm1bY$R;lI0*k#4>8t_p zju-}#^!@@SE3GEr_}LW0b;UVd&j7;k(-m%?Zld6hJ_SO=Fq=DgY<13a_h$YXl$bg6 z7$Xk~(#*0g1vqrbn9|FMH5y}cZ`_sd_f3TcHqZdD5FAeLn5QV>{6YQ=6W>5LQPV7L zU@Dh5g=_W4B;FJ|XY9!dU}|vUNSuQB!T*V~qJIv8-QgsyLL?$}-R0}U@cR~0I$hM?)&iTd%*VLET3=J&mQzd)?Z8HA?@!! z8unFoimwemhXdOL?P5xkYfDa83pS%Kj7Ekkxi)z+HbJ-d_DFwbBaV6Fui^Hmf6j|Z zhT~P9my#qQDT?ZMe#_;p`fVXdL_?#~G$Ow5`^*53+kcl$W$7Ev>kp1fSBGqD$Xt>auR7d5A*uxS~f-+u2>wkwD1s~C|TtkFSXyAlb6 zwDDxbe#f4-VqQPiFdb|lExiKC>y%~+&vmGZkDfPzp~1Z&M7?omRN9epv&84i;a4LZ zUi>&p?klcy%Z<2>Xm;kBaHX_wh|&`}#Kkkfn@q^=69D2Rui|OYCTQpACL!E(|Ed7= z4AxkUHYu7gF4t1{P`y5Z2@+GNPsYVLkCYQn&>mDQ5AqF;Q}0!zetk;$1~wMDYIY%u zt`JFzoicF4gg(+3_Ij@uW%{hEolAMHP1NC@#_1H*`o>-c;34_E0Rt&kzm;t%87?7A z0VoI^^aByoIWsfnj<>7}u4gqU8?8-VU;FnEB22F%b>P9fIvZ2YF#5ZNJ3uPUa_-BP zwuDtX5g}Z3q_H~j1UenH>kp-zQOXAq1;oTm_ut3vKvz0%Kq1ZfpwtA=r_`;YLI!bx zxAtvyu|eQ#yr*F)fv$ETH6HN-U^tQa`H(Ls4EpN3tvTjYq@S^D|G0hpbjV8mX>h1T zma&Pza#OUppxDtE2++lCedDfK;Ux%gVzuh@>3(|G1%_PZKnJR1bhSy0t#*5@Y0(6N zHR@aW^vR8dq#$V}Wf!#tu5gaQ-wii1hhT`M|WjU~y1f zaXV}Oh&JaFs03MA0SvAwcPYCRzL7=<>ar-vH$8LP4*X21sFklo_iM((mi5j66@=2? zyyT?iitZ*lcLVLhK!L6pj9RU99H`mvWEb;Xw9OOD-g%?LpBm{fq3gadf;4{w5-lAX zb%Hp29YFxrr9YlG=QmTOEZcmxO%53%IXUC`YFhZlL_qM4KP9nidY9?_KJPP zhVNFiuX!XuL-?yRwv+~*mm7IBz+5~awKqgUieO~z*X{Co9iCYru*nNX7rPsX?7HP; zQX4(8ft9f?U)Su!Pd+^$TT7~7Kf`ynN}wypBSge+8-Vk{(MdJ7jJfT1Vo<0Wpsa=6 z(~t77Qd#eS%>=0-KOa4MCIc7vyq;(>n2018?*dEWy%r(l)`2bjSf6Z_xYrLzb}W+^ zboUeFNMm8Ejaiwuy8gBpOp#Yj*|^$cY&?P*u+{4OaF-Q-#J4(joH<-rBA=%krxCWz za!CQcoxw5KC{#mw#x570n(fTc^h%grDuECji$NQp*WW6ohqeRqH>ABE?%!(Kn=xSd zh(CgWtu~b<`?E2{aj^oY(FLpwpxoY-vLtbIGk04ioW@+B>t|p~1;U!DexTXR>9(*zf+ZaNRWu@>t zR$E>IK_(p6c#&dt#Gv7ujrzY9j<@%7rO#CZ2dwkH2QHy=8_0Um2%&Xe&ZqLykfFxz zC4@`UM5Xun?xITX{5MDT_YMnV-9*5`mt8}O`qM;cR<4g6$nyREL-5VBQ-rdC)2!VF zgku&tG#an#L3`ecrlcojtKd(0!|J;N(e}1}brF(iDxg^No!!-`fW}L&Hao}WHPw)< z&j1f|;=y{}YF$aV6O1nEzF(=HEY4Z-8?+z_#+481d~2yVHfle?VY=12C9$yyd>-u) zx{A+7Q6b16FwxfG)aONS%^~7lDA@_KY{1i7HxjYehKfdMrq$|t7!u#P^s8CtF?Dy) zs(6PDRir1CS|?M)cSDv_-Ll{ND-_fp?mMZ;=gtuU>w1G9lcw1yej)Nw7zcXS*_7n8 zlKR_A8xLYKt|G{TQusA3At!7&AvZThPf;S5#)!_MMu%Jr?=3ybQX`uvogMVvVZpP>*>$06(=AOVhtXVfC^M~G29 z{nTTt5{8{ZBA-1`7(`!Di^Jg-qK3`M#mKo5}T7G+sESSOGf`g|CbWl z*EO_|qi%TCvWU>QV3C^pyk!r=bVN6o1M?b`u|h(;(9|o6=oAs)n~Aa81)5orIJmy( zBHn-KA|aW=FF{(k&DlzF!bASUu#Nxz=@4|nLz1L`($hQ|7uY`~zT^Ichkw+3zr5G~ z{{r7#n%dX(o>iRP;2rlQ`Bc$677d?O{i}`~3qn7VIyN1Q+Ot=_wor8Z7xdH_d@TxG zQ?)0e`VrY57S6+!7K+OOb_(tIZ1$puwd$%QzX<%)_5?yYw#~+9uqLEnOYd9j434Sx zI2GS?p*={{=oSCu$%Lkf>m3s!LF(U1Vjc_fINX>nDufzRWN;mt+$_9kPwr4&9^= zUKfB5qIVYliSzqZFuw=o#Y;#&JWaHLYX(|?N=j~Xzq^8q2Ps^ zqiVh7)YWyX!UFT;WHX8n2ZWBrM*@q(8gYsV7^5hy2n@7j^dLKZ^%v>o2+OTn{g~=~&Wr59a^y zFypZrbZ|x?X}<<*Dg4U_-{_*NUsQ-p{5n-@YVI!A9&ZWMT0i)ozc}CnA?g9r8j~f# zBf{B>;BoJJD8TviQvTyMip<@q>HM_n^q1AL>ykguThXfsX~rmCQTTD%G__W0SPdPb z+3#El{mA#LmX?rFUR~}9+3|4PY=MdD#ZUSRdyrivCKhYk+oW=RDx5v2_+fUHcq&dF)n%Az`0%{N-%*5L2jQ-iCbS`#h)tTQO;CWi*VRT zs6-HDv%EW0)8u?mmt?n9A?id|z|F7zJ&H?zMtuKRHao?^M-r`yna9CIL#{rD-_(Hz zug{Y=?VFd)hd#bpmmG%>SLJ6r$BEkUMni}&0o}WLg%66*&h!FT>S~XV3@S9_am%aY z3wU>bii=QJuY3f!6sR}QO&r^y@8@+|J}ZNt;-Jn^_e+?0M~xo9mU(uJPj`Qw)&4m{ zupwz0oQQ&hFXXEnnSKR8VVL?|2|A+(que<`hEhY#*5BN~!|^FdVC;MnNi}pt^$+BX zc23R-J$_oH-2o(SVrd^Vd6xWosyw0abcMF2ZBC2=_oAf1T-BlZwt@t|ULJvU{Ir3~ z4THakXEd58e-;gC_X(|ppMtBuh3Lh|kx2Mpme8k(KUvk^s*u~MTgY_Q`ye33x9m10 z@Yda);Wj9C^#M|`xh^#ek~oLL(^lO#xuB`1w`h`ERVp^n%x+Uc(kYws{A!7D6ct~7 zIRlxGptxAb3KaItEAR>*gNqAVi?!+c<~A1dw*5upn!ODSuwZ8z7DpcN8X3^G+IqIL zt@O9d;P*46*3)P?ZRln!_ZVrgS>tHJ)p4n_HJ|K){5KD^X^dR6b2}{r26? zGj%C2y@r|EVrW%2n3+-bDZyPdMp&)1T9SLU3@^tJS%?g8yV{D>U=ZoqpAT$rSt0hA z1=-H(CbQEN_5yibnb@-Xofs2xQE?;~VevNOKIY9J&t)TH<+%1=9kdJX6ru*E2sL@% zW>{D=#%1QT#VtGq>ZS${CYWhzTsAzNsNQXh(Ocj5E}&-ik!sVoz6nU%vs;@uM{Bat zF3wH=@nFEABbHZTQ)UL4OuJ7tXWKFzm|$of8B$ioaJyXdrMFxt=%ofr7CHLy9fSwtWpoG2DBAMdQ&dnP#ruko^>{u7S3q&o1tC zXcJczor6KswD5FYMO%esqDHpogF-l!OqqP~- z*$Ws4GAs?FR@DtchB@;!@M~_g9n@x}CI>KhJP;3Mg41ummEwISnE`%9@=#7hmUQ5wgxy@n9vi!yh7Mdpb0r9pC ziS)BA$;bKa(navNWVa@tpLEAa_k{kmaiTaGK3*ODKTq}r4m8t0C!*H847Bg4S{WX@x~)lny!+Gx(+Ar0&Gyl(>PwAqBNR4>`=#^!YB%xF;!A5U>X9#{N8 zhJD@P29rVe8w`8ezjQ$Hf1Z?MCC>{Eia=(8dr+{-ffb&Cnj-I7`e-ZlK*=ZMR?X0dZc^b>ya76~>O z`u!+fE&482T`jzK&CgXlN$DWEJe*LFu8i3_6fr!|EHe{-BfGgxl$%4$$DM53EaHe1 zt(iyv<>s=J`ZgTVGf||4$bKi>T4J{bFy?A9cTnMZUIy4 zDhga4M_k~rk4Vq%z>feV!fix;k0w)7Lub&) zzD2Q+y-0-9IAFZV1UFZ}4*QQhPR!x!%E8Qn93@3P#1<=1JQ$>P&Nu^~OaU6Kf0Pou zA-ttUIO@`e?72SdBE)3LDtAHi!v@4@+gmnN)-QY2jSGw=)zI0%RB?$ozCpk=xTEl; z{!n9RPb5=K`)H8Jz~CDE1tkN=||NbVgCEharMmTh_b`RFWKsb%)V;MIU1t6Eo zm}>kWm*K`O@ZP}B5d)du`aMw+>tip5AkCgm@*Eas7zhd-y0QB6da5ra5O0Z49REGMmEf1J}VVBSQ3FA@Zk=)1&C9Qw^DZ%mkSV|Sc|Mabv%YMUpys5T)V9h^#VG;Mb7 zt_wlZ6`QKr#W2)WguN{28qdC7z%+UdSwpSK0?>9*(KTm`5pgfnv%X#js4Sqp&2@ns z78n1kbvvj4t%8F#=@1Y#S!*0&db2nG&3g~I182UrjG`m46ETY=j?>>=Fhq=c;30i; zw`-zkIDwxFnW#y&zm-IQ7*qe%n(-Y$R4mgts6n|TTCJKc&E%X#(G#aAp}5gW)2GFw)`{y-iDEDk!pXXfksx!@IHn)U&kc=6- zjn$Mau;qzabHBZMqsb?UaD5SyrjOr8d3?j_-4Z|SkMaYH9~`GKvGgkbDsQ_dAa-kW z<%3R`)a*$q=v)O_8y1WhvL~yX0_TQaoo9EP0QiJAV!cK)(j0^)4yIu z@KfDw+9q!BtsG{XuxCUE;iyHh@Au^}mxBrh#H`yT7lk8HYs+&ZK*W}kT1%)j^O!^x zwr+M(tC9)1+MA>?6spX=qUj9g)+_3ns>EbZbNb{mEb;3ZES0v#go<4Vm<=o(a=M}+ zOZPZY)DxQ+{Oh`my2asEe2H?!uu`Ey`rrsrgD1U}@68 z$#+?7CwO(457tlvDgT5bxHWDXY6ZPN_WN*rxVcurr0Y!24_q&9_E^#1Go0PkP?I#q z2tx5#mkyy)oG>Y7E7(L%qV6{3CB4nnV)}d9{JnST2$jZ>PH*HQZ=vUPsfu@Ad@?Q) zxWUw~kH&W1_2z_9LN2qqkekBD$Hw|@W+t)m`1TzwbW`Sw2TaD&3JN~rf9Hn8<>aY`->=iN{>c#w9TC}ikZ zX=%E93e*+_-uLLOmz|*5RgqC*SwWsFPM0&{f8co>{akq}+-T7yC>!xstqBYe>W^?~ zAYhzjs7}OyQPtXdpg%qQ-m6M~QyBR;`z3r!V{u3&KQ{MpC&uQ72}^PQnTh0Aww4l z<0SiiH8PUggG|r_Yz81+Y+G1zR&QDQID=on{`M_#bwSG8h||QP%6JaV6C*}t_5{{K;?e-|tQ2{v zLJmd96Up3IUqQoZ-E^nZ`epeIIZ%zD?qvOI?;rdGpM??UqrOtwKq7m~@h0u+$*yQX zEQQdnDu4*;-X)I65?-5crIZA+ad7G)5a03)@zY6fqJRo9p`F!)sknl%%1AGmFp2ZU zcf=uRrNmIoit~i`PDz`qqDBi#BNFd8%gElVJhay2!4sR6WW!<;D;e~aPzU7Xlrfr3 zW*oJck}Z^!P}vg+d3?buNVn!C_KPoPgSChZnYB93k{1?@Y;oX}X$+WUJrGEl90MuT z@KZ*tFbr$x=Tqz(LR_wMJs@PVKYU?3-#sf^Brc77>%9mTl~l*=lD8b17@f<0mtvNX zabEGTT$h>9=S5j)d0%rA96M5K1gW`xzaK@YJ(Of6uHZK~{Lye$hq!TsU+ahkX@tk+ zbI?s_zLy2_r+_PnW3PAHqW?0De8$C*YQq$7%S@xdzM*Z2z^;vvm@z6SrSQD-iucu_VVO~sp8(mv12=DIRPDbjNRIa=?>K3FHdcz z_s6?LucoZ{SDd9H8S zhb$3`YVrfUauVQit-sEE*8ABS8xng3nS&mLhUZ?Fb;=gmQcom2SR(wpBRuOx`LN^8 zn7xOEFBK_n3R=R_<@d7zU?}0oVPN*D{5;c=j}JBveBbkpxlO}3#Xf&a?LLy?1Swnb z(MH%}lXkfJjg=6h^DExSO$1Z2IUx=Ohx&A8>Hq>;uy~q22w~zEIOE8=W^v*6=X-_l zHiYa{Lb|{&(5l?d16;u9}-fR#_G=CK{HZiLL z`ku*+odW~}t0FG+Q;FTn4r+D$93_a`h}gW80T35dicuql$*KJ~u(X*I=gA;a?TO`8 z^-7o*za<+P|1*+g^_*-OBwdfu`0HN*OjH1((t!sQ9eDZ zMn~m9fRxB*>)FqoEP3(~mr}ZG>Ky63DLJUC2N;+IZ) z!H~I#!dHD^Lvw?kNWZlP_*L;eKe*duD28dto;t;dlFok`h_n{&S)GTvOP+P$a8s$8 zwvKTAd(80F#on_G@nDM8ECMoR19*n+!A{NqMTQ z+#oYA_s!?z8ayafMHDlY$lPert>Z_G44hO9UnXOqcRb6&m zsL~`YBi!opi#E?1abs0Dqz*=>`algSznS#&o!hFgraLGgzauv(1y7_P44rS*{o~ z7DM10zfzeNSsX)H4E3ZKOCxGF$Pg>H&fvNi1Gkq3WgQfq(r7ixB@Pp-Djy6q-<+6{GWw7D{%vZG;h;g-Ktsr3l}$g-P*@0z^rF-OQ(QgzikZ4C2Uxma z(J=P36i<8l)!WslHV%}2X*riuma|$g-v>wB(T%KN@IPC=g=?7UYUe-NwnBk4 zotIUTK3|b~M5&o?Hc4#e`yRDW8m5^FxNExdAHwlEvrC(DbO6 z(TV&4K#K3P(5u-mrlWH%6_JT^rWd$+qVWWO8_KrrDj~)YOJ>_CLWac#Rej$NUOY0} zo1tJNdMSy?+W;@|K`y%cEIkH>5X~&NlNhR(DJK#~u1YdggoH$=K7{Q*+j%*?f#uGg z*XFOs4n{DhUu>30(%y=$+&gQpApWWT{TYJEoO;(UdR8|!u#UG zbzMn0!VBm~K!JwGoy9T1iRve(BOA^Dm-sgP(Yr@?KLLlIsd_J*$!}Lc(0WxahyG-& zaJg8d(D&-dZtLW$cYAe+y?C>@xLOqh_cS<)TBHfUQEd;stAZR#%*06`vKMHLBTqOC zC3?rk2A0EEsZEt`)e*?F9aKpqd9K>!J)UwtBRlu921F)-iIz)V7(aN{(|*MQ zku{#+TokvAM)=m2VLu)`=g#=IR#_)}iv`9Cl(%TaFr8g`9Lc7Lgy1cH?aJOe`8Kt%fEOVX=&&u4b04 zp;GYBi(YEc=TNcVgZBIa33HWoVV{Q+zH=E7r!cjMEsKMHdbg8>Ku+;+4&|uVh7&=I z;ilfrd11qp$;}ioxm*PLgvTrP6I3KP~;C|7gCk$ zPB`~S4RD+>>Yfjq*v8p!8(el~UvyKk%pB|SInhG>froEqRVHo~ z10M3cKTvF<0b{kYO!+_>{s37r@&!-(aftgTU=JT`x<_ZZ8BpS2X=)Af2&`=80Lb7i zl#qzSO86ij#DaHAd+J3_wf337>t{(V3)kHw02o(`*@sI=!{>>}^YE;Z&6PacG9i3* zUHX(YWhp7qNSH;R zy{hVR`?;qFWU#G1fOMQK-+l@TlAm?D}zF zult$rld-83^iQ*T&s3Cg7nB^>q%F+=cK$?E+2m|x(ylqFS8I%(bzTS~lm=sIW@?!j zI#gh+3IwO$XR!9Vp3kRbql+kNWmt&VjgV%Zggr^N*g#!w)}SAeG)p26D7elu4Mtj7 zBl}IC1F|pM*l|u7!~0u==U~3pMG)nGk%O+#dXGcckCeY zw%}Gv+j%~r%3Ddfov(WTq_m9P_X7d1WX|^jYW?JPyoIdC>Q#h;yk}4Eex0UeyCMfL z;gxc<*--O}t+d+ybg3CA9`sPjJr^((wZv0Ko#E z*0JvI$y!b7r@Jm(XGs%gBBUcHI@`#l&Vtv~{c>4{>|+JfQKUrAakbGJngkBSn#Shx zn|ndg_g2AuA@Q`WD?Pm_ zd=jdqx~pi`mf}x=hI6NNJpN@2!+5#~ix`GQkE1u4(||?g62DyZRKv6ueWnU6!P`q* zLrYKOcyrn)&BfXc6yi2Vss~T(*-?PhD5y_DrhZF@%G{wvE97Q7E577TBbnR=_s_wV zBO&7f&K11N!jc&|Lgg8%C0m%O>sfl9^LFwfwbNCogxffy$Cu?h1ow4x#a}d*kawKD z>&z9<)%s3o1f9{jKM|}QDq*G?oZ1ZF_HhDF+zU%Kvj2F#JQ(`TH#S4@?icOiNsp-? zb-TJ!J>xz6aZiriS|6{YfkV2lGKR$%y)glKbiaFOX^ZS?L5~$(ImV&+Wi)X8^|H!F z4a!lb-Y2V11FVZFL#Op-ER*yvHXv-;Eal$93*u(0Dg9DI^W; z@50E{5`sWgyky>NTY-zbcZ2j~5pcBm9FpwR-qJDSF9=bhu=;XwaQr${L*8+DpObL+ zIMk?dB;2!JAKV3N%t`}xKW84s(m;_j3RF0s>6Cf0UGut_n_yHhJ->;a--apjkZZt^MassC$FnnZC6vs4O+TMluM`Ze zKfj%NL{#Sfrfg=;)x%}cd;amfvTDuN%4bho^P-$0&uiQw)JF7F1wTF_R3#W}(6RQP zf4LrU7hAOS&&J^Mbj@mvZ`eNiP>SakCKI2u`kxYPnwAKEIzRuv8JfA?bDhoVQM^&N z@X=j~OUcjeChcOW@5^|p@!fXOhG)MS!)BUpYUlerYiHlr>0R<-94zuPKKgz&H4Qxj z9NhPgE8bT!Z`Eqmjg0>rIPP+>Ff~5#a<3@h_;>kdfSgQL+uz59?Q8~D%`Vr)cmMOe z)<3I%v);kw46RqrA3jny+0oU>$U5`iuKCMcBg6?tTu{+HNw z35Wtkpb`QcT9Ot=!b8BSU}80|1ljnI)v(O3YH4&v=wV?xxEwT31Th>!eOL(^paNM2 d6Rvsn^S|h+Oy-HAGxQmNz|+;wWt~$(69DBy2IBw# literal 0 HcmV?d00001 diff --git a/runbot/documentation/images/repo_runbot.png b/runbot/documentation/images/repo_runbot.png new file mode 100644 index 0000000000000000000000000000000000000000..acb3524b2f973736b06e3dccc5fb1296ee44c8c4 GIT binary patch literal 35062 zcmc$`bx>Siw=D`GK=2^JCBZtlyCt|ZkO0Bm-GjRm9D-Ydh6HzM8h3Yh*T!l5@%orB{!``Kt{k5J8#xkR6a;io;J z`RO-?g8>TCGWiub0_Q82d>XX|!Zgn8P8l65p=ksK5h%%y^3m%B#6$}p-W#VMvxNS0 zo$h2iEHiuWIi@Hn_Jzs|AWE9q+nT5aEIz@y+rkTRnmeB7PV5~a8J{@!%y3~k&3Chf z*$*AAEcdNGup8yJi()c4w1N$q8=MdAdt3S^kY|F$BbP>a@5lZ~Pdh~8+m{K+i7EF# zqACOHF&M^U+;o}UCp+wD$$lk#K=v5QvZ2T(d)POE5;hyaSXnwqwv|j;;|xD*_!!x078BPwVbk^mJXOfu>Y zAtdlGMmlt|?q>eZ_d3QYpJd8CjK^{1T9BDhLF4CQQY?Mo+{V5RLa z%L+xEj7VK0EE4pYS$f3Qak(75L_VHvnLS4j1={c!uH0O(zkoOT9$qGr7Yy3V$9(^O zC8&&A6=}7>rkv#DwCj9en*gSP%ZI7q za%-iIasMWEpU-j0V?@436&t2&0~VQaCyOG5d}2h-Q59Bf5v&aq3RL;nm9U-6Pw@QK zWe`C0@a4?n@_DPNslR4QW0&6umiuIYN82}XcPA~$ue}KA;0$5hhk}FW>}@?-V4C$H zo+0cGk+*5P0>yl?yl|5jU*=iOzCg+dnkU7`;at^IPG&r|;#KtC# zd_K`OdG#HrJ`#qImehUpE15hdN=I6X`sv68gwFUe=t$v<{a_TvEGCqgp%jWOJd!uo zx#+5%Tq`g+iaBhaR)(WFqsyfxE6uPA?lY;d6(x6uHGu**tFT-QPmaAcCPBvPX2e^e-j46?o3Yw6{Xw%gNcvr0jabI0p@<-epND$g`wVr_ z9B>NHI{;4`ze^W)D5@b-LO47r*`SI=QU&WrRx9S+1_v4f*iXQTOYHGta9A>~56Mss zBv-!>7T*Phzw-7}tAD>x*X}FJ<_y2sv&EQY^0^!>w>SvAy4nxz(63!MbPzy&o<9!v zl@ZM|V3KcP{%?V&V(pXvkdynTv_69ZCc!w4^4G;9Zs7yWHH|E+|`E zJy$dG1am><_gy?ExYC2ro$qqB{y`)r>y^*bTBol&(!^M%D16m2G2j^9m)~z9e1UH~ z&z9M^`99nrKJ&%tZ`R&UV%Ng-&xw9^QUneVv}iWgEnwPH7zhdR5SA01-(D2&Tcdkr zE#FF|-TUGqDnX+GNubd$Au+oh!*GRI3%HSn?9t|CB0paaf`f$D)z+{NUyEZg9)H#r zNQ~X*{B1IcIRMl2E6=gdFhL-xZMSPTBqBN3M)})~pn{-pL1Ll7K(%I`Kv^11-G{um zJU)oNm9x*)`RNhH9rTDK1I7sZMDU{V4RX1o6#iv+X1Po2QB-{X;wuFevK&tntnvk% zc_pz4G}p$TVb6<`#s;X{3hJmfNf-rDg;vylyJAC@tgP`itdl-V4pu$+BiBDlR=OX& zvHlgl;L47!0hZ!VO+=(l)hHV{_ID`hHuBif8Fg%4X0MtTuo+~`8TQAf-H0!1B`RKi zJ&2s_0vK-V4yut;3FvOnjbvzd4%dZimZq9XpZ09eTwkGL>WZIl9t|dZmFuCzpx$|F zH+-*zhDLHn`qvswW_kwZyW6MBsgOkOfXSq9z2ApR+q|o!oL3>Gx-}=>hwY`_j5Aqh zbJdJKV&WXhc>goUCd2+dUTtmVK-j(%eF9gL+RYrKLx)v?c^Lx>CIvX;%n$H(`QY)-*%91a=cNB zbRa#JMYq1O`vVufFQ*4vW^$-Mnz9I?M{!~Lh{avCAoSOKf!sZ|Z671yw&lvFUIYo7 zJNvPp4Ser@2O#Q3w!JYJJ0dOH?qESR6=L%jjNO`rDb{KFu)ZOS&qtrOMB8q_=jAC; zjOX3Laxu^Skt{FS|6}-kDlUunZ3h`HK05s0OloVo=b%o{VXbjA;0{xrP(+loM&Z&! zLWg-6juYeFhWlrO|1cfP#X;D}MFpX`a`i|u!G{|)gz2{gXxN7#afFgrMu9KI)kSqo zf413UzH*V<{f^LT_XSp)&#}tp2Sr&_`$ZMKXHL*&5gF0XXZ!SMm9rQ547XJD1$dF- zcvqi56+yDDr4umLxl(|Uv4fK2>>t*SB2xf%rm+fLfdUpK@9j9Dw;?gG(#4Nxu57Mi zleM?~VLfSs@x_|JaC#>p!H;22dlUAzZQly&O$Y@w0=}U9aoyfQ52vGMHJa~Q`u!IM zGYf}E_#Y9ojqT(OTimb}r_2aS>8BMCw2-qOia71hc)t>9 zRe9%-R7vooP_^Yb*X%YKSVre^y8V`0^^Um_2yVb?TGPakd|o_3v&?TsG^|ilwQo<) zoJIqv`}ynn`Ma;g{_B+We@0!Wl@GMJPL-p>)6(ljFQxigOZn`gX!JV!S@~y7+(S;( zj1Nq6os3Xov~Ud@-h=AC_4@2-V#qvAYmnk@XkId~z`ZoGI*H_AX<-obdj@zx*sg89 z+!FszA+!1I2AlZ<=Ps$m&#fS`ky>N4hzUX7Om*SDK?ecm=a7?mCiG}7a{6Y>or{tk z0Wb(3nq?M~YDyXFK$mQ|>3e&>+&O`k@)&A$g1v`H1K&r`VVoZF5^`nV=`T3hcgeQY z7;EsmH$yVjeWt9ZG*@P59jZ0vuYDdtr4fsKoiBoIJUI6zn6l@pn8u9pSjJ<`H! zh8%cq1Feb;R$ow3Wf2$?804Z1 zwz6rR-~IUUEl%Xz1F9gcVs}o~>7`apHB1ZmpWBNR?@+t?78B=G%Q)otkqWptF?SFy z2i|;tQ>$x9DmRq6f9FT?hps)Zh9R6zCP_0o*uMv-aavsDJ#b6bb=~ZulP7Vvtl4&% zd$obS4TdhTW5x2>00q|&Du4cp*~`Br71~hEsn@qO!x$~6?w{@2>;97|`a=Kxw?SiQsRyAkTtl4K>`@$GKo^Ub8%W{l<^x~A&+sf_3fT3knU!EY16cr3If09r4-15sg6SHZz;Q8 z>gC*6I)@dYW1f9gW}d)~VCKmp+1E>$QloAwC3qzVH^d>>5A^Hqe5G}R1*)=!#}KIg zCJXUhlP0rW`iBJ`fevIkqX;My#Fp;S{fvaS~j zHkHGpD_OQ_+VFHw5KC4r0A2RS$SCz2>r$u8>AsuwG2HdHW_UquG!M|EZTr%?L^a1f6i|92* z$ws^j=OPifV=B!RDvn_^l==|-wMkRrh7ZB}oNH-~EN^SAb{WOvfEEwY$y%>1jA{6~ z*ZatK&zm*()~3fHZTnkyax{>d1%m;PmvoL9q=FBs&P&KFwBAhASV^r}YFmmRz3xF( zctTi&z^Cv6m@|rq^1y^?j{w$MCx`;NiMxi+yXe|>8Z4)Df$-bE3q@%SUlX2uRX=*>G&#S0V5 zcQy2oW_JhpP8-5$rw)(F{^PQSM&B<(mRZ?!NFr&qT?s5})mZ%K%jZ?@iL@&S4A zXq8q`ah;*=9y|^oN^z-!`=mp=G@DJjivu>sWey6=i*IiuR7lmLu*?hY|&9;7BxNa|_pS{}Rt0 zmZJ048%PfdSnqqM&WnDuwQU1E_V!F~kYN-sQF$)p#Dpq!~G zg;PraOu7ZB{>~|hVZy9kK;{(zefq?5k5_Ly<*5h zB~iWb#&~QFw=}C8Z-es@884_np^ljuNG%3{fuZ1i48SgdHN+@JXk$2o2)9J2 zAw$QDIo7Xa1`(9J9f(ILQRVXT<`Z$S@y#@95PIMpqTBStM~vyd9@Xi)t6U)ze1orj*_mrb)bxS|LI$b<*Uc=|kC*1J4;p zj6MCe-41WAY_7T1o=_;|i_e{Hi`Y6oQr64|XTL^VW*h}R;eae}a%W4HD}eVPt0{q| z-NO6(^PZ)V3$nIks7hzqpF5tmOvKrBuP``*W&}g}3rO(K2a2$th99qCy!(goYz?7n z4^{ZQ?0c1q!_PH>$zt>6e0R^K(f?Eg>N8AsC^V~slD9w$a@U_hN6-^Xd2fn_wbl%TwrWa z5U!iG8KK8C$yOq5TuSE!fl`0TNs*0l+4)q{S~@8VQKmEMMwI(Qx0eqv#j45OyLqKE z^Aaqxe%a^{<>u7bwzoiSo!By5qA9JtixJ!hg*b_^E&fF69UmbO#fi4@s!MpVCr(5>qkrT$k($LTDZz&aDgo%jLEUyYe>oitT&*{uA? z5SQy*1y2ikQ?!dW?sfck&_A&>#QGpj!+qB*3*DU3D9}pp@Q+Gd8_CPcG^ap5Ic~i0 zhHm`PSs4_LWAZk@Nvyez1ne$Scj;}FfIVn3mlAihX2+vYB(%R>x$((Y=3TA%qCo9O zbWS6fUUFEM4^>BT$#ot-seVicz5U9_o_f|)dZgTIG&j2 z(rtU%EjI{4d_}8z1^myPSBFYV2C2mq=j<$B8BAAn*kH%&zd=@UH7$V1or`Pi$R6wF zUzI@iRLygJ)?L77(x z2BXcfbP1fCS9i8pkL=fXIW1vI2joVR)^Q+huA_GJ0^Bd_$68iNUHTjauD}y3cFt4_ zJ8O(L(10zSeSkLiY;#tz0YRe7$?}Jh=l5}#U2AuJ-dya%A9`KMWL;PEX z=4-;#3)qydv|Zv{ufL`DqJ70;{Y+?`DrdE>guW?g)9qSiz^bZ1@6XLcIEN*OOoSFmqxa2H{g3q05xJq}EWgQ)<^g^&Nr&*2XmHoP4 zqbU`d0_xE|$EwL8ThlIgcQq|ADV<*aF@SE}rCq^hI~#Yc6SnFUEa%JgcJsJ>awlgm zvixD?p2jXWzt)%RuHm!7sVkop1W1`V6Hm)A(yd2%pQY7N%~=%Qsh@(`xtYq~&K~SWnm>aWU!dXw3DUu z%5}HF#6I>RtWYs)d@%OcwNzU63$7?Lit;WtD0di~?Yg{`Pu35>QK&2@P^FZ8qjf8r z+_i6Y^rUqO$tBkXI!b;>!;QW-#z99V>Y_dOZzq?*_{yIZ}sv*Y z%Am=2%=|_HbXCVn3CS(D9xfpd$FfD!<@+u_M|>~qCTCa&K&>Q5smotz@dK7*;I8UP z7`j5DtnMKQS^VJDh+S{fd?dCm%qD*wp2D5jlL*U+5r;>=I>{p5v}H#wj@vl3rBZYjUXo*;g;?Lp&fgeH z8Lg=!aZt})EOn{|f+8xpj0O-Anab4G9^briL3>*h;2Nd8G88#_yNx{S#7ooQ&2svX zC}p`uYd)2}V#MPgNP*}C4rQ_9yjKcWs=hi#G-+$*E{#C%Rhz60!P*K^M(dy}?I-NcF^pZ;+txm-p|C>kh?k z^``IjDhlfhj!Z^697pq@eS;PR7*79U20Q*sP6Z*lhjuu2A2>|jLafaPTb8~=m6cG= zk@AgCf$VeDZR-+yW|o?r>|NBmgcPc(qC^&^vx?n*fW3ke@7SyCCBu!?w|2pqzk6Xe zON9?TvodwzLIf+mZyLVCm@3g5`0X10ZL<8VBm1yLDXD%V)pqfkGD4n9K_^{)-{fm0 zwcLF5Kj~d}GV_tGK}ez>9olMYTl-~`l#2!7RMHFkDCmHz#2f+^L0r~}^tZ0-Bl)5q z>#UMCNSJcMj(wb?NjP#eT@PQ$sA>{CZ@D0F6Hj^GHWoGB`U(;-Z~G5!*{7tza54Ej z#6rq4l61=ZedFG23)NE>9#@HTlkn~~Gel-vH-1#-GjCGB?{@jpYtU8=getdI^=X4?!(M82JY24LbQQ zA=zIoKxWiH!~>xUB%OmDZwk*{l3Hj!-e??(brSTVdIcx44?uG8*17}9Mk>}@>+=#l znLlU`+3u@YNE&cCdWRC^%9LG9r(X-p&96j%4ZxkZ9S5NWXV%G2De_cby5T3f+hIt> zj$hOML$_VMM0HU^=0sG%b79E{dF{m^l`|)$_mOm;U+G=PqJUW`56i>qursxBH7w;! zDlleZVNBybP^5#J2ar;cvBTq~=A7b)PEx;vUS$M6)*YF ziy&-B`%HfmPg7>_;{70;+Z1|Q63_lBIFp#0_`}i*;?sI*TBq^R6LU5?Z8L;alBsE$ z{>FxIP?S~}`v{)7nM;AcXwAf&Fv9+w^Fu+jW>Yked{ZbqzKT-mt*^c&yeHc`ey>Qr zt(MJZ!C^x<<{(YSaV`lUpqI0Z47uIXiP%Z<0P-$g%2yybjiQy6ZSL)zb&4-g+}@B6 z0j$|(9L=#l92^!oyVV)#dK?U}m_9Hhk&GdiqP6PE@Dyd5;o+8ylg6kt@S!ZI^j+{0 zfQKBi6E1QjD4G6jS)fzV*7MvwnTNWuYUjDDt6{$^ITiC(0m98dF z1la&B_p0wh)?%}vs86pLEbS#=C;UF4+Jw;5v)XsE**ui4eT67_Chw){U0RovR#~>HrN&ppxIE>t(z>w9fp(t$m*uu+3=u5lK3XIJ zToO__3~n%e(lOJRc^fm#G(=~hDJhc^elP5?cdtNm>De=Yum zQ?4(ef21!vnbQ}#4qL5L3>(R|NKl%|@6D5@nVI@cJcT=YYv^B*^@5`v? zI7w+Mwl$kVp(?j7!6QVIsx%6Bf$uq0Z1{-OLm`M4&ZFhqj0vE#ktzm(1sb>gy7d$o znye;sifiA%^ZYpOr)EQpqT{0!2L^ZxPfA$bdHlv@`4`X2&TVAU48%W^mRU}EulMEn zr-t(M#eZDZ9Qw``xY3C%bXL(9eN|+x3h;|W5Ef{}I1aL5Wh;glp34-C-$P|zi~*LT zihmT9+-ComMNlxBv~{X52pk`eQ5idkpH!#adk9XC)sv`MYvvR5v2Z6^$(-=JliJjo z8*)u!^}p?T19WK$ttL~!p2z59<^mU-{sMKvy1dfdqP`77pJcRYb*c6H=7%qy$El z%SOS9>*B$!Q=~W8`?9C9 zMy+L4?X~9Bi;NhrlX9WO2$sC?lf;B_eK`&}6ZGOTK~S|Mg!PY?Y7(?q^7u@^dHBi; zSsX`!qc0fN#X3X&O7ZKww|i+#rp%p^XH%hbr&tDAQ)5s{vP05QR38nQCx|qB(Mfr- z+}0R{hPbv?z`5gD3 zrR5R(Ps#)@t9nuYU1$936{oS(U;z z?9>&$YVvzxM`J34*|^rzxGNq%zZ+$-FKEcz*j2wPiz?DTb{qomQU7z>n7ABUk->ip z$?Ws~-JuKlzlq2G=T6-J6Z(hQK)8bMvm??X26QnsSdx4$eMg3w2k$aK z3q*-W!(#{SV`@-{^?J|&Tb*~de|d+zf?l4-xl$h zCn4^=TKTySu{KfS+`m6SDfBGxMRd3S)dcWa!^YokKFGWC4i3p*_`aaifkp}!{O8-1 zLAk9KGBNkm+EY&qGls3;!S;XY^MRAmVVPVF7F+ArPr%6-|7D}^UwS(dG|J8|%v1z9 zu0s@IR9+wtgZ5W z8vGP#ig#J+GN>tkg~12ECGg?H?O`U{N`$&O`p`W0^-FnXCptOPuOzZ1qEb06UZF$} zInuNxLu26>&j9k++H<$oU~{-? zuIZNE+;64{^6s=sqZnGZLjLP2pqteP7(RK9gxqVBzg`j^uihS>_3ubG#h>cn`5E` z?)2N%Hr}RgDrl>RGhJr4aY)qLp~G6DYpS@|rrXL#-*;L+<{Nyxt|mib;VD;?Ljn~c zv6ukom?x%5bbX&-I4DcJIP=8)_PasM?zggB0m;4;W(%?siV`o*fDQTh>7lH1tz!{Dlu)L>kkh0^b%=*O&_fny1iQ=>9{+W^;$CyGI@a%jfeL z`Bs7ZXL@CqFAcZ=$_i!K2?;8xYRA%S)BClZa(aggM(bz-a0|~0m~OK@u6ao?E+y2S z_y#JS=YmFDNBV+mU{k(CLbdEG*Ri!f{}?(aBGo0muCVpwiUCtEDLNu=mo?m4ui34s@<*GbkvT#h1Q3#KrxS>|n`dM?Sh(jhQIQqU zaj5#8VUN&sdK%c4M2@TF?PZls`vhDkY7>xhoCb!E&|JV+WIL&EW?MZm|8UUl#hlD{!E`O9?x-f;jWUTP?uN7<1@aPa&trZZ&Hf% zf(pTXQG=T4>Rohd*`ho>2RyUXP=_QW!^LI^K1Y_a-u8D@;TXrPCB`iZ$Ljhw4=iyO@(>gN%yzoa{%c*5sx zL8WE=UI7QG5=&!|gS(eyYxU(xX4tnT8}Sv9Ymp1hEOmZJ5U!DnHFLk47W~aluCJu= zQ>K%Dg%ek+|Hyt_iV@#s3Y4j$*32zCaZkt8=tfoo)6dT zyb1DbUA1+$V_c}jFg@t4HrzBiRY|dW`O~_2an&3}(+}nN@)rcezn*I8{CS@?wEQ_) z+DSPV6CPLV(&~on#f^WZx9<|C!5-4=aHCQvr?X18@YWlOC3D06AA-IAv9G=okRl4gOW6Z_m ze&ux6!;RWmAQR?C_OaLDT$NAfpIOy<%5e8ic{R=JV~ZHKHJ7#0ZI7~qdL7Y_11bN7 zK$CfP`>h76Sq*jy0=7(%P^U=7PMdV;)cGK1`j7X?)a4Gy+`;j%FU*Pd z@4mBxkT@>ZhtoO-{EIdmH`1F%O;eHAyXc2$vueED9nGKJ=7%xdK7dsBxt^8wkoyv}faTFUY-~AkJ#WwX&Xo<+l%eCp5*3pS~v56{2{t|d>-m!7ertuLE z!akh!al@G(e<*sAev8q2t?Ege8twdcexFto($RM?O8#;v(ony5@o0Z`@F0>%I zhW&=SrjY;ECc)^Dlu8e;_rwfK;O}YU65dPUbRwHcruZ;Hu~8fBmUID)Q+h-Bim{j~ z)}2pYP)q(U?)11*x%}*k`*r@g$;YJ9q|R$MLKTJ|!^qg7X!+1X0+--Tg^T4l6Z&bG zX5;2P%-E?xlM#cy0`IrYhk5FLT5U9-Ns;|r&Q;GZMUhUeP?~z_XOS2xDI5k@kPA)i z=vy!k(BmOBpeD~Qg68dI2m#V2*OtN()WM6L&ZkR5bj1k4u`3j?Vq4FW6{3-UeXPY7 z5j%pAgAq^G!a^)xoKD}2pAgn%rEi5k5`*CUnE+Ov6r4)ET17Q|rho{ejIbSfy6NqB z4A_dGcQHmguh}x-)~roa`+#d)9L&EZK{gw%=emx@1_S3_91Ow)c>MyVn-Y7v+vhAs zU>!)WKsPlBr#Lfa5Wf&GkMRY0AU4~kCC;*x)DsSUYTh)g#lE19^O5TD^VXknl!uU{ z9CYxN2QNT8T@+^U-<UiTGY5V0A1)Tiup;W0Et#?Bz^3ij9pYoL{9)8nD z-tMg&H@Y*ym$3%bE6tG24J&pyCuhtz6JK!13`#I~`hq<0X$d%)!wW?+yLnf^^Nl>+ zV@%>9WZUTlF^fieO{;D82Hj#39G2;|ONW#@!0^1?ol4?sMuV961AU#&9O$9gkw^3- z;NCaa9hHBruK>j1vxg(^@S$9GKX`}-rR()0Z{)lG;#+57j`bR6UY+|ZE4$Y&KQd=z zo>GaeWZz&SL%m3vW&;hexs;bZGRVg|2oPe%CE2~-yuvmO$+u94?+px`{7@2QO93`G zjsap+{q@DlkgGMO`_g7x-51bawPm_4>CVuvmRV-!>gp{yD0N$902a!}u==5e>G7fEI54N?x7qJvBv*;ox=v%%2n19G8ol!`+h?lFe5;dSlPQl$ z8h9480K_V9ma9ZmZV-5}ZX_*r`bH(pO&3%Aw2J6Ku2^h%H7A>-Vf166JTLKM#S+lU zEfI7v=ZN(+_P05hO{|FvhOzSGGX`5t_#{=;)-^|O=Iq<_R7&l@()p4```i!Z&IuAH zRd0W$4_7p0&JX}r&sHAn0ai@nH-|SkBPNNT8U)dADA@ZoB$+4Yz%2B5i8EZ;?Fi!3 zve%V0rcxs_U|-28%&jABJ}Z(&Gz~uC$|gwX-j0Atn+8x%IVmVJ&`#SS!6f=AF-wC7 zusFaqx9cqb)UzPg@kJEGQmcIrt_$z(RaPf!h>9#z{B_;cQ>Z--oa@c+@osl_qaaZR zP6$ZtdQJBJ4zyx!4+VT0{BGCQ@?L-TOpY+aV)f@Pd&0ouUZnPY0>y4Ps5s{RSC&>H z3pKq|`8)b0HG<$a&WK(TI#aK*pawz))CTNJF5245&D;CAcn1SR7R6!|#ml?o%4F(6 z7RC3Oiyaj5Q>B#7EMocNzrxCQX=CNb?Wc-xahGiof7On`i)hV%BGl{1W<$Dw!RIca z&p)|d(bWv_rXLsNP9H1?Q)ORKG6C6G12df^?X{VJ;GH(?;^BK8@qB>tw zgFn0tUOuHZS_{h5&wMud!7X&sN6D@xhOnT!ei^=X&7_bAmI4vp1-Sg8j5V?bPS$?G zvwudRKMv#N@I}o(zha$En=i+~ygHk~HaU+e^iQ52JMU*i_Ed5ID8tv&tZ`XK zF_f>3^U=r)0e9=Q@OPHeXtQG1_+ux&RDfG<)9~4T=aZW*pH-@)6uqIFgUti#EziN5 zJ6;HniSghbXvjTBOVR%C9*|iMs=v+h9IXbQ^`OLZBLgXUd4T8mi*HfY<=0Dz%$x%0 z#}>Z_)itZ`F-ONFN}ES6UV*Xajc_b06*tgs)v7J&ci_nM&cSp<2~UpEefecBj!2@f z3}Rz}5Ykm=sH#VDuG9@Em9KMk14k_M}t z!_0t(GPbA4!F|(}rZlc(4J$z|2vn)^3yW&atzs5-9*1pMsp94}V}z!KId1{MAS^(! zfo^C>{!j>#;Vp;`N~b1Z#_JmN5lA%MO|*Bw9hDq_f2zX7qRS#zkIB#iVDodT!mV_7hb%M%MV5N z>KkenBVGD0eVr%$eAX}tk;K5AO0+YZ;+i}k_`XR6g}t^>2mG--Gyo?)_ox0{qrK@b zpZ(`7TCvi*e;i{I;NXQny`>OM5NG7PY0kP9Vg^y{_Gz5z^({XZq|52s!oY@h?1sYBM794I7wv`Bx3^#TNj7uS8%4?`M;6U6h?d%-o zz}FmzK^xHTAM*d%fYKHILX(5B*>4B}S3kkF!qy1BO{mUn>0! z2OC@7n=!ZGe(+}i=iSo$3~H_EnM;eJSj))-&iAQR!AbtZ?0Cz7cldv*XgcL^`w?t! z8b2`iU>KKARV1`>mD^du0+m-GsjYF@Si}B*_X6RSp!G!D?U$>pR!w&&eHYJxZ4T=L zr|e%k>y?nSA6d5qAixDL5TCsu`Dxcg7d;H~iqil=GkBpE7DZbVaUPJmZP=m@qaxzK z6rmi8w0Ha$Lp_gEVs_l$6c%oG}Fa!K3N9t)k^mfvA^IH4T3;l(buRr3v zPpEE1=x-qgBdyR>Xt4Lt@3Ug!`IGlegGB-rekB_Bb{VhQ;*z1ltk_ZY1;1~jThIT{ zOqZwlt*hXZjs%hXrAklDJqIJy`5FX|^$(M!)w9s%4md4q05MKAXkkmC3)=8OrmODi z%fI|(>zs()k-NRJL1thSGRKOTVKk@-DbE}U_c!H=f_Cg*4;F7h7zH&cE19l_HV&UQ zr=>2gvH0^$6(HZNJehWpUA)G1JDZf67vZ$Sap@A!KLqF1@ebzIM{{U=vjf#63_$8y zh~*VPNSR!(tESenI%B%(FEx{n>`UF44zq33dtTlXN*|#AMz1`Hqj|&uS(29Jo|$PO zzE>^Eh3T@ouc&K9_8O?3LN9rd1xMZI(+raQB~jrLsA3nOeKrL0lEaDbix+lvFe?GB z%}=K;EZp^kvAaX1LWE`%{Cc+i5mZYqcKso>57i4mb7}TvI3b@v@K}a{z75J3qOFsD z8~--7H5e*%sUwEgRp`{y?7G9cledN27J~L^+ly z=rBisKdl`dJz>Zryen^rdT1j3W0d^6A{nBK=luwx>R$(r*Sex~cHV-&AR;FT3KE%= zm{}$z&*w>Kua7$Lri)5Q=4G z{a3VYHue4Ad)1v}d;WJ$7oD$*Ymr8w0|9qCFXj_LS9jct)1r0`Cp1DG$ zGCcSrRNLYs4``h#!}qUJkIo-R>WH{v`4+?{df_d93-;9y%Jyrmk?#~}a$5Iq1ma+% zZWiT@y8HgUU&E=t%;z32cTSlpNY)7ohU2$RrzOff)Nm?%Q~IU$(Xc7B39F2fjz3HCag{onOnD7GsWqA~}J71J~D%PC>Gsevd(j()og<4dd+VtYqTfweCP&b#&=nC)ILccQR_LPzCymR@c+J{ z`J+4|V+s=RuQbJmiE+jN>_p(roDYSzU(abrJ*`Hi5T1~I_J18Ln{_(tJNDjR0Aj9I z`#@(dv6OQX zlXAbNuxr05^?6vQiq#|MzzRbGtbt-V^q(eYiUwQ*ad)^s7%G@~XZ% ze1#c0Vp(jfptu%jdUWmE#<70~Q5Rl?=uKX;02n_|6p1&7P?0AbPC9T5r$3^EWQEVb zT65XrzXG)5tk{YKDYOTlHJDltcJ*!pr^(c0s=>Wyd!66g6{v{NrX#ZASBK+G+OnO< zDkW%e7JwvE`6di@+w>hTCstU~ZunQ25J@9dH!LkJ#>K^zoN^hgHbiR79jA)Z1L$kj`W=CuAFH(1-&(z>b;{%@zwpI4;~hIwvQZt_RA;1!t}9Aw4gkU$YGD~9B7 zOggSDhM?k(6k|WW4{K#5re`%(q)(_~7R4x7w^ER$du5K?Yhg!b0mfK#tTX*@qA}UM zEjme|Dj)p4McGS6c{n6&E^DDAVS4r{!(A-tQrP)J1W-{tm(H#mEzLb(+4=Jo6Naew zsf?E5)+b@u{>={R}a;#*dJXa@SiMlU)h+{5oK~QtGDgheO~J}Sa4?qy6~tr;$51+^9z|X zuj$b0LSUK1!`{ds{_w}lh7Cu+Uhb&tl1n~);cd$K-v;kSQ1XcIFAm=kgE;1fgF~**8>0sGz+4OVDV;|35`@5PC$&C%-&yzx|zI) zA!0;Kbs)*7FGt^Q`F&=WqfFlgw&NU<0I<28unX%X%Ph{J%sB4Ria++t&od`k-Rluc zTEaOv<7w7{#z>YUm13*;Xc*O7724toQw$q?h8fP%FNneMJnL3zeQaILSVjiZao7ad zV?iflSV%2G-XSb}#D+~3eqWnhlEbHQFVE1*DDy=hv*E!_l}UX!mbDkF{EGY2YQR&Y z3a2^YJR8*Z_={x$N=MKy^Af>4Rn`pr!B``CwhIo7fc8=66Gc>H44j62oxmv0%2t%gsb1-PQu{VQd~5**e^BQylG;oW3whl_Ym_29gbV|1K#jE&l&KJ?ZwZKp=sb7nhMS2<7;~fJ|66L;!Si z?-5Z(Su5n zZ%43wdkAvW-E!BQc31O_-CZKgIiY8MyjR;7Q5Lekf6;!W?>|_3{?ME-iw0XQF4v38 zUF0>s)(9XjIlC!n@7VD|Q>Ibli?`Qcd#F$IboV9Ni!WwTAA&bl1!;I)BVxsz#=)F& zU0L%=38cWY^lrJ5UK&JKvmXNJk>ij$#oHLqvT2gPKwIy2Z2$U`6Ffd8wU=DTrVbxo(&cKjLJ~I$Nt9i+9R=()5e(rxl?I!{p2#1Pk9G`Zo>D zP&g!?&^U@CGoC-BgyOdmXB|J5qa2J1Cw(mXCFt0^KV$Cqp(2uaLVQVYW*ixF(?9+x zjt2eX+~IT8kAyau$eG~hrAMdlf{_2qy%lgL}Y~-HRWc)abw4m+#H9l0Wqr^p$ z-x+G=yPJ?`_~q)o=_wl3&K!`osI5foq<65R0=#%y@IvgDePca+q}^icGePv`n)|TY zIc<DMp2J;H8mG~?22`5}w_$fhxM2E9=S)p468xI>{ma@7QcLfMR%75{z-Z}9t< z=ngMbqpS(V!M1hY>dZxxX1^4n8x2A@GyKp+l9bwHKqKCO_TO@q?)ldW8^eAH!ircj{*q!qS>Ma zyK?wxLH@gzhjHfbhLj+fngG~vEGa}*Rn&%x;KHPb*-mv7Th|J`n3oF^Y&&34H^Tl3 zIKh}5W1|etboF$(KG{%`-QyE1pW<_HdBkGQO7(*dUnKD?PxFH@Sjlu!ucwlgWu$Xd z<>Q?zQIhS@I@gedkq5;<*v=7-9sgVCzcbNsuGt?z1c94?X{7P;9i_2fo$qXjPu_vx z*_E(-u{nxn@*rSI=5$SY7(XQ%mfQm>Er%;Xr#>x2Hcy%uAYu4uc`D2*O#W~e0rbSL zy;-<=MrJKysi9uy4V^udt#5LiLr};ZR?cBHc~sufK$>Dt*tJNzZGe!hvbX=c7Al(e zAKnp7l{5PF%Z@XDM6Bxk-*g5n>mfqP|JI_Uye7&2|BaaYZ-HAu$9w!PuJrkjj?7-Y zoMp#(tw5h4XW)uB-D62Ec}e|MFMR}zYt-=wc4;NO+02Wl0F ziTW9~AX7kQd9vFMCKA`*b~}4${=4fI15hUZ&L|pOtvcxun#r2Em$}t?&hfaa%x?C` z8Ecb$h@JdpmqgEYy#ttQ_B3N#%^GTj_IX%yn(q#MeL2+>&T_fnzj6*I_gGCdrgaPH zE8GQ6zH@zqjcFsPs3|oT(@NLEu6-z1$TB=RgrBYU{*NHR-)I^>dxK=;&_bGz*S7bn z1xk?QVT-TP&*!6=1rscA^ONZ!647B0CCCnH()HT&EJW=o5v5s5C zXD9S$bpM^zGhzPo*`;HqEU($nqpUG-yq1G4`(bxP-0;@1pqAx^1AsPwdl?K9!xo&-G^4Ze~;YSkfm@v$lpK#kz9udu5Xs zv-Cp&PpTAUX9GXh+=vRvU=7RMNg+4WpJ^BdKN9MgM5>F>LK?8vqVzfj4lmN zxkJhW4#K7XQWajm&qiXxe-uU6VpyB&>h}^No#zF)L(1ZvM5u^~GW$nKG#&JFB!VQ$ zGe?b&JPp%6%@QvTIz3_M#SmM=0C7b#uBnUIkoI{}$aXgkHcPTvsAQp{o^>z199qZ; zyU}Gz3N3c@IZeCIL{5Gb$@gg1@QCC%Ed~vtt;eZdc5mXygP=n-wO>i;4C{E*{2*%# zxG^%aB~DkClDx*L*Vn=-4`Tlcrd_Yn_jjE-U%k62;M`v|e>V6?o7s%k?B7j|g95Vc z7Ox)g_GB);Y(;ZKKSXp#<*_jm_CcK17Yz#Nvqkj`h@EQQ2!Tr<)UH=0l`*I5P6Y-( z=116Fkt^8al`OCAlk&BpbDBb@ddBX%C1Pq(vs6;e?&dvH8bb5lWy&WJm5aMN80gid z&t_vu^ZBs+M^IeUye|<4)Skv4n9Sd9CMfw|GP>EuZoN^lOtK}|98o0Jv{ zO6*{L{+CjdiqY+C|dTi00 zo5yV$Gr8-4;_g-BnS~SO1P_+UXZ*_!4ZPOhLAP|69zo%!0emt;v;eSNT*D*?WOddetcax{M>TzGGeSUe96R!L zv(#5oveO!+a6M%z0>$+=*ig*mXiDXlF-qUBIh{YSNKyBh8MA+!=$oLKWb-+v&Fta% zfFeiPOyBtB3fFQ&qbG3Hk@7-D(ByXPlaWAsB5WD)gJCEjja2$_QiAge4_EtwiK0#X z=r0=vF#V6MzHNEn(st!qi*5~ZkqsN2Wi-TptfEVB^^!$Ad*qBsQjz2HFH{CZE!=(L z-6bj=^$8woY!2_Fi;^Cp6$ZL(vWq87oq#j`=J&v8N#b(caTL%TnE1J5&GHXBv~OE; zOTZ6^ZlnCP5KX-SN7(XLN~A<45IKJR_&=AU1BxI22Julk!dgtAJWf27Kl$N`&ySMThuPIrwt%;ZI^ zvAU_?Prp@(Jp;~~dt-L?%FNT#qOJ?CNIFR_8b(4!`r#3Zs{V)*e09q0BThpof(X)i z1>flL28a`P<6l7$WxA9VGc7BFT0uf|84VXI+t>b&q2>u66pgpn*4TBnC;oa*0d{&T z2dfXiF;c8Vz&hY1J*|-mN;q3`N`6fLQBa-6r7KSIt(k|%1b*wnKD(O(Bp5Ou;ZlI! zdqHx;AwS|`v*w=O%~fBc)mdf-y| zy4hZKDM5dRe>hjs_!#ww<7?yEg>jv+7YZOKXSXmVn}%vWNXU&l#3iMj^*;fyi01DA zL~)+o=eae}F9Rt4W{5cRK|HW4;r~nwQMFn`2itU;C)m&ey?C}hUQfTAPgB#S!#|p1 z4Xo(Q&O)y1VEgU{1aHC$ zHs7gSiL7BBvcIala)FdwbG^L;7yWo6GCQ`A(>UY2sM1pw>Wo}*iG>Raxe)5{G>SF?O2p8^Y_cUOh(}ZOGqV5Pz>pUg{8uA6()m{2Aidy<_XJ)Ap~$ z!P0nm1t=#IRH0EFo4w$F;t0XNX(Ce|IHm`J%hG%&CyaLGH+;b<%LG2wY?#Iol(`pD zgRcx9>io!vvQzs|7fM(DY0DrS0=U1Y+JCa+-5(n`U@#hLB*A_&A_K(;H&=2ROgANX zPwGDnguu+xYxMT-q2`_I4cB!c28ALg^=oXPM)5YpdO_77@C8e zFV*TmM``VkCL9{FgMdrsNf$cGKf3ioEf|2&r0+a|rMdQd{KV^|1u*tj$gR^DUvoP0;nacKd~u?($w#5#JIn78~n?u(UAtQyCBdkLfE=NuXA?lW_Q`h zj*4^KF&oT)Pt>2*`Pot<+I{#m;?VNEhLFx#F)&(W<~kaJLmpj%LB~VGJtDo&d%~x% zUr*IL(eqtM#quhhgU{sAdEYGIN46v`$#Pqr1-`a9X*k;OMR9B=F}=qzuCAIvG`VnZ z3ur11fL&!?o8ze#CaF$hi&T)esMk|~aVY*9VlV5Mrx$kmTL5%i;q1ky5Rh9|SvXCd z6FrzC{4$4T=3X>sXtM3}I4pN(T8{3_X3Zd5MM$(VfUyI%XxRd1{vCFqeD zyA-&gM(su(-p{xv50rkQ81z!CPts#uw@Kz`ILjv9WxGST?XEsdc-tNT=K*XHQ)GLBa23TAVa{q+{K z(|j+&a!>OUKsHbu|2$J9;&*Jcfu&ViS=J{;Okw6#O-Y{)e)> zl`^?%Ju?5iUf4P%@L~Gw{!dkD9C&Tp==kFF2jAUxTfVIRR>Rind>2{ce%ni_ND+k- zLhP~6>?2l$#X>%WcaqJD%u^9@fh8(%xJtJjUvoc*A}))|Y|(TaFF-ZHCf z*$5&1xI_FZsSFO?L;K;r>vjHIeM6KZSLv{!7;93~Z#NRu=pqmWrqEccXNrxaY(9Fi zAgQjD#u4^nO`1%L6a-dO?QT9?U*8(bu2Z3KadA!AS7%2iARfO+H6ry zjj7?hM?Rzhi|Y=}D*9^ZGk8Uy*$lWKK(37!C}y7r)iK4;u8wk?magbO3w<5Kt8G}7 zT)kwf9b84+v#T}szyOUyJ`#O=bs{k6fQ$D|yh;*oC&U+kfsN(O!Lz&Rt@7b&J}#}Q zDqdHS9AFUu#|E%dGQ=lYO+LTnZ6{LcM^imk?)QhHDT+a7vmg{an?Zdy^SGs@wZXh! zhrzeqUoWS_C-A$b?6P&Ect1sTYdAbj*qDpnCoJR29T?Eu?DeYG?q636W7d3(}CTgND$e{6B^(ZON z#NVM~z4771C3`8hu%6UbqCu+i>F?<@$W z<>1LD+k${At1|vf`3E)lesULKC!_uGJPf-_C4i)RF|A|;j_ox7I}err<0~+%MiTLU zIgibWbtJ@<$NQ3E?gi^3qW}zv^(|SpoG4tlfa;ekM>y@3(2f1y0*Y%#-?_S~1`6Y7 z9?c}0eaXI(o&D;pBz-206e;P$OB(jiji7e+&uEhX)Ne@Q3k$z&ilJgibk$=U=8^B8 zuPk;}<9oHhtz@Ne&iI8Vn4~4ut7G1h-E-91-RPXxwX1>qMl@65NkrM){Oz@{?Oxru zU0j5lYg&@=l_c)_T!qAabj3#oEsu`{?K^_D-|h@fr{ik6{{01wzpF+Qw1< zkvsQmgDK<)@!>krU-#5SM_&1lSG@Blq(t9}oNi6gVue2acs62n>96POjSgPLmoS62 z*Ma}e8-}%AWjb_j=}lsd>`o4=1xG$AmPQp8c^_HY25#&wdi68X>x2no%%qr%rDMmw zyy&YbMh(O~O2L&(VvmJ%JMF$wV2^#mR!Wb3MPJdESSNt?eW_UlAk0k1nd;B3NOJMty(nOo{Ea3XguukZAeQ|E2w^vca z%mW`wV{{G@0oa@|>3Kig?G5*EAa1xP zdeor;K+k-b>GS-19Zyo+ejYy1AzmkZw{sC=(wNBx2@@pE1}s^5s<8w4>r*Qk;i=&bxCrKzp6&TBmgEvh(@4G4W=?z*;eJc!oXNY zw^voY3m#5Y0Iiu1Mr{nj3^^(bCjFVu>H39N+i(iCIs7ZG9GU z=D{+CFp8rP1p8Nf-R_kNugUiL^4SPF%-bJQ5CkHIRES<_#_b*gP<{UBj>?_MBx1K_ zzubK{<4jA9Z(c64bZ17BW&%l&2&vpue z2^3~F-2#(s+s9H-TmzYZBRMW=E`MH4^VFmR$q-qBU(1e`ww>N%#`}x64Lp0(O7(j_`-bdV4XqkG z&%|{R5@|WF3)H^2h_*sb0avwew<(qhA0!O8LeUe^p-9OFY)W@dcp6 zAX)Oi#Y9B=c)~j{FC!wt65Fhbyz|HJZt*{r{UB-D=NFQs7Y9C zA$MM0Hu2UDX5e7zTFLKoBAdPpAC}75|@~6h%l7AV)kzL9eF1+ z%S%u_yvZ-YnLIbEX)~hHHaNa!+XvS+H7XfK_kn#ez|ZKfiNSTVHpsw`t>4{O_oUQ! z;S?=y*k83qhxN?pKGFxbzIi4h<^Y=4BZ6mc6T1e{MO*s~h%EEnH4uo_rD(ST_^cb* zT%TJ46NKi1hrTSudXIL>=o{~&8+J;+rva;1-Y^&a4qzN`zmdvzw}y|v5WYL}O{`cs zdtN~oyM9rO=jJ>9-9|@8xslAKrh22#aUb5VLod$Bt&Cq83&Hjp=A!}7o3H3WB7&=8T69@y|HgiT?qK4U9Ykz&! zy+{LP?>`@~K6neWi35_%`(K|N9q+l&=dGxBK4kBSbFRv5`$=Hjs=z*$y$0`)2N#qWr&`uKk3enDvqO-_a2Ob$&P2s6iytd1P{4N# zpd&Op^zL0MevgW*M9l62Zsq1>m!}G#BeBT1&10@L;Uz22Un|{gxxQ1%xz}u)piNK) zTi89d$rPKC9lM^kmM`52SyseJ07$Om%y8RoQrkL%JtPiXVPr}+LsN31u}Z+`ewZwa zN$lMjQyS+7?B?b?^2yz}U)#g>HKYK=4o8{aXb%+)yG?C5HZtjq+#&MnDzmMD1%W$g z^}eU1wt_M*0jv&ZBK>MZuNN8%EtP#}hszb5{w$qckf5u9tNHW&2Am9fmq!Trqffyj zJIWEK@Wd{)J_Cr?jD!$up&LCV2xx#slm}xJtnAUDxjdE`G%%;vV_!dEH~7dk*~BjQ z)``_QlH6n0nDpG|bu#+K_f@aoESb@c0ZZ2{a7(-(HIdZJN0oBVFSXn5?X-8r5xOg) z|0pBP;lP8TBXx2rpBuYUV{ranr0EtfGScJ{1jN7jyP!*GrtPQJaRl3}B#Ovj^78e4>x9xst z{per!(1`?dV^9~YhKr1CS5I1ZXzIS)8QfSqlYY;cDPg1SCgt93nVM(;!o%LF>uXSj zm1y(Tjyf1*WS0h9lIVVueR8DOQ7u2+C&0Qsv(j6?ajajeaho>8`)$P11an2ED+Zn( zZB;v4voVW_;*LIe7RsAm$EyQBAC2X!-}HTABEni*M%Ck&v|4<5GE~L(JnGUX$hWoZ z<2gSQ6tJM#>zqk`Gyf=?5>5Gj7R@ebp#5%vfiNR3OEMI51)*E)ao(g|^_$GucdC6N zLum_P;a!3EfZ7&@)Hf#vt8+HP4-a|4;~sjKQ>HFKKlg zI7X5azSfc6bRLT@$A*ziXmuZXF`|kyx$E}p9zsok=WefVZAQ8q9;~g9o&0xxOK*=9 zfmIiu>72KGa%YF|@+stDDCr>77!TF`Df;phDuxfJ;2GyXH z(S_pc(=ye(gWI9y!ya&cE|aEU6s_?>kUJ%nyLgTqgt@?3+8TW^s!C*U~=Y6~P)eqYT<|>tY)BWqm{`9q^O83CW zuC4&Bebn84-m3hc<&&=vdd0Xzjen{tNuo5Ko3z}Yf2Q@j*?)gH_P%zzr)|u^`Q3DN zD(kmXw31gdn@t#Z--iR-FxcIKWt6J-Kj<@&^D&_TjG0Y=CrYh5oh+m988kSJ2ySBw z8+H!71e6o7m{;rFKLT>>jHdr6wTd)Gr!{F@dbfrw)#28? z?&Ao0ue(p&>)I4Ku6gT%DMLPu9boxXB--2@u=(k5$@-Cuwb*@`tKM^CEIIuYWHLEm z^z6+WWj1N{waG-Tqt1pGik04v<|Icm^rq3hm1u0GGpkUfj`z;4rdw=eOnC`+Izy@( zbTGhF)*L}l1*Ba+EiyjB0=|jCjMZWqEO6{!aB*x6xZIw9;1<}wIb~#w&=n5qXPkol z<0@k-1$OwA#9dWx>_(E>wkkTBMeBOXcJojh7XEya*PRt-R$lJ6ALo7DC9yAZY9_80?99X8e&6q zE}ty}m&keCn#l}#(_)T!)u&95U3 z$~~Ng&xPqP(cl_f^fo*8d}%ixVITcihEx_H-Zp}OnW^C+gpGpPAyg)?H#1C`&22R^ z5knC86{`+!JA%$(u$S9R>cBi+hAWqPefdLq@i76UhBl3*>vM@!2-@dXk?H+zUu(UF zz~?wd z&8B>VclBh*2@=}8Z`=(qK9Ovf`>i3IKtQ-?9%WwO{NPGbDS|7LPWOP&^f?#1X=5aq z&MNI{CAZl3J(X@Xcy6HlF@tCZ1h^%h!kQ$LX*Ri{{7{>f_K(^yp-;+ju+{}4P&g8J z$kh0GL%-nCeZX|c+9p`662N-Z3~bYTyPbOgH(slM*a*vKBiGJRM`|!{5Zn;~o z6H%f#C>FNeuE@2Y<7iCwh5Udu7``sZWr7`$QAT%>&=&i}UJemW(KlQ-nYH9aFm*9_ zxZ5m{KH-56$?usAPnYr`Z$75MOi&ft|2^1f&~RNRY78yeh*Xe-iYWE;-v)M%!F9B}JfYSNC3VX;sG$ zTlJA4A+mxEv#7@c`4>y$`w2qN^*7|JEQs;~;l}XUeVg$sK|V9`=?2C}{d@YIANC8a zm*R!!8@HyWuNHzBLp;h(5PW;vb5TwwGQ#0Lsf7--mpK;?5rfXNBX?>k0KKh2Fn4O6 zCtPeB%QsL{+*ad|bPOA8Yy}1MkxFy++MF1H$ved%BiLb!1Y_LU!rN%8_3(f+X#tsq z8+Q=o)TMSGQ&?XPcmO~{ZM21(ZdQxCGOfy+NHAT6I%QHNZ9llVDP_l?5uadmv~*qY zWPfBC@-j0dk7gay5b?rj6LFeZ%oTVcOY14gHtiNO=|NQ8_lt(ZO3gpIKN&%e=4|q{ z&e|bnCt#vBoiLk?*#r9B0Cy#} zwYBE0SzcBr%0#-`!RtO_)Am2kR}V`(^9#>icM~G5701GxIbRRAGR@lCsU?}ON}Xj zJFReN>(^nhlVDQMl=}p|=l?ZEuuqnxgT^QQ&&$=Wx`N@SZ>ZGcFVA!xWcT3nwtL@; zm-UN#aJ!&=TkjTK5V7ty9X=}7(>iZ;%x&WD${vf-p(YM_N|09a*X-#9Zo(#s@Bz_! zCgMCdauh6leW^T6fYIbc8VY@-%V>L(MNzW9Y5$L1wNnvl|GDmX5^fo36**Y*Tq^u%VSp>1x?35O23m;$}j;fV8H4#(- zJOws+SEa14f3%SeMfe&zC8uDi!#_fmFTvM)#C65Xm1&hJB{f^_n)f<{el!C?m`_Eo zzGoPMaIH*O;*hstIpn9v_&Pz}fq(-6T4U#ms%ZWO7hkrQ&YM6$>+TQgK?}Xg!zamtx~2=>Fd`utbmYXjb!*< z^!?cgtZ@=X{n<-BPq0D3dZ~RYpA!IbSjK|^g$nJU*zHygIstG02e87iT2@nnuwFKr zfGTSed(LS5^yn=h>cRO3OF8x17_cS2Jfh!$2`xCh-|)2gY=L+Y1XT)kNW~t=YWdJ8 z5iU-3+ALu)UI?ZhQg z9mLOo@F;}4_-(i|ts;SjoY2|hWB^9jc7@*w-5mWDZ+hDtdz0*GG*taNd7t$Y>(QUv zE}D{C9{ovVj(FE;&oB(m@R7)Ir4dIpW)94K#HpJu9Sw*Ht&C`O#YCI~nUK{2PYXPj z(qe~YL%kXQp#%yK?F=6U3VCnw0gnu{GMHh7=M^LCms*^&7y~6)!3XVu0L`nZYQlsn zc_K9+-$~>aXDx}<&$M*@rtf$vJWREr{IKK(dS|(f<`oJt)ZJ%&ZL`f$r`KEGm8jRJ zrekS78940d*k&?T-F>&C$K?fV&be7XbpEK%&M1Gdb;#??CeJqu?ruKTRQJQv>!-;19&l_$!Mb6Ii<(^}7ms`Ts zy+XE@GxT%M=wsg-Ov;3c)=&u7)fR%~0yoYL+%Vt5V>JoUz6wf0AAC|q(FD~b-O5pOXWoU{37jK=L zce>evu7+&z@HH|m_u^dtjc@l>Q0iwLj|)v5!7cG)1k4o{qq(}(Azo=~nq&^@#4gIm z;%mVAU}JtrbEf76X#H*=k`l%BeAXFbyJAo_Fa z#~uQnBs3EhGe*mr6gMy9%ohJ$I`G{A)5ViPSt4vuanbzQ@+#H%eb>@%zm%bBEN0&h zr_|($!ty!e{K?VxJ8zMQ6IM-!Y`iZjjk=S&Q`dA-1!_~c(db|>ME1QG!k~oN0agqC zn3lO#iqRzR#o&cqp-@8Z>#J&J-97<-J}b#4v>npAlSIPx*^sYf_@dExC|%TuOvP9z z58x}bYPmyC7n%NB2H_z?DH$1gZb0RvR2drV(o}oET~nAzNY(C{VXEmM96^7;!G-XZ=v-X#q5!k{*gU3mp;SHW zET91Ym`D}=yiJsoX+U`~zAuTYpfT(Zj*HR7uIrT~SbYgy^*jPR?PV2+AM~7+m9zvJ zp{oR0ORshe>9iCn!=X%6&`YJn!=vTes4_o;c`9@9vWCZTI2Y37F_j|R6C#{stUi#~ zFvNL_MOdBGTc=A@>L+PvIF>tVuF~Vg4E#v$pO9IgPv!ne{Qf{HwV#4RE>Wf8Vl)0D zFe8Ol=HLCH{0Ul)#U3DKF!x)qrvhbgS<@di=w)|{xo&H(dsinq_|ubztmy%YHPz~= zv9M|!NG8C?Q){uoBf+P~#6u50k83{yzY8^OrFlvpmcyCGGg93R`;1Ys`d;x&$}>4vM{B8dOvXJiaklb|IR54I zITx?2fb&i!zG97?i9$rd4T5cCs^|W?uRVFzT-}_hSvCm%c7~9tykGF=mwa&% zD5dYkd%4lCX0}2+WBUfz+ksn(dadhyXC0RCLpJHO`*w z9cSu2rn+)#o|<$g=1#YLIx*%ifDkmw%uSFZf4zO+x_?gRQIMGnaAE0Nv6v#F#&%oK zsmIDVZ({?ln5eVFy@d3fOLhL>$Eu`JN08-cz(}B3*23Z?fSBvGG^6tX(-^VA1TXJc zF$gA1bqWVzTlKTRy5xPV?VSj2fd{{2BoDB=TCUGchNpS7k&JiW4Rxl?E`HXSRPI+9 z!N;QGgp(Bn<6dzxyZlMx!aH2n^T~o-V+A@7==ROI?;C=0w-Mtoc){?QH@{oL1x`JbloK3%;^3ypVl9@m`7dKAWrP-mr*V!q(`^SeIlNoRqHDTzj83JZ_3vFD6q+vEJAfF2C_OlV(LZt%XPk?I#D0LNA3bK= z_a9v#sRZ>)aZD(`%2@T+=||^w_(W%Gh>QJ<^(;vFQWwX}^Xu~aSSyUo2d$TAVJ!j+ z!sYsMSR;TQea^-2U*uziSjwEWN0!OfUkn^nXfl<)BV}^hmnic)&gF4P=kJS&R&wNL zjs6r23jN))ePYZosg0B_ff~I`U$R>yMEx7vSZVMgZz0T8hK5z3qXy0`hWO9+1co(6 ztkhD9_7f;-K6F8*RlKg$tboZ+RdAbrw2C@kqs|qR%bH?wk{HsS+Nhl8^h)+H4^Ap) z7>ww8e$z>{AL#d0%zVNsceksgs1@i*wWW373Oni9HY7j*GlTfDiO4^px++JqCqf_8 zrd{7crON+OKOJTz%7vIPM2!HpF*I5dDXHM5VUr~tr$n$j)~-#lz8hTH{8qI&#jb-j zLYUN>JdPy9-oQzJ3Y?`K3+D_h@aAc4Mq&(bAvfg(|q|+tMy4N zrbA=(If0^iX1t|OB-UU?s($0s0TxnUqbEaxgC~ew&_Iqd+|peni5vH!9UY&bZEo&* zY;bU`BeT4fuJMW#Vf%T@UYcFG?%^*7<@lre^e3rs*C^-=rre1+$z*yd9#)%`_Iez& zLf@&__I3oucu1~_U4CXsR056U_%h@2CfIO6``n>AS874G6^4l(mU+o?6Kmp|Ml}fT zo=d-~U#}T7H|~ifLuNlP2~diI5&RfI@QS~doeO~yPS(rui!sz(wH;;~n#6=euhSX^dGlo92`meC-Th6n~7 zdke8z47-6e8W}ZUy2rRrxtPtfdP6GHnO}Y*mc# zQ@qZr5`bDMdml45FLU$cGy`+<`4v*?+S_TnVLX`CR+Sk%==^6|pg)YDZH?=>&zjx%*vCLd8hQvG!RP-) za+1$#(Fgtt{`A7Z6)y^XD<{%pXOlBh#$No7ZYi|bofuXB9Th3^5S<+Vj9|*t*+8$FVh6k$Fy7b@nj`|etwC?bDsFBkTji} zecXia{mDf^u}Bvg<23>xZFM?CVOX5LSbNltuB^;#Gp*~H6NBN!BXPQfUQtH9dM^pT zcEY4rMN8MoI@+%%_*te;>c{IO0)w5i5u;V`&+2u314;sslpL|ieh(Y%9#tXE-om)z z++}vK?%p!mC$8~mkmpGY>oE$l+15g6af~mQjHFpoa3s5$`#FR~3fiR7XZg$R2{yQ* zl^*fTo&^=_8CxEw5za~VEMJPxLs)Pl$KA6ZJ0|I>!QOlyXC#OJyq_Q2m=fvoy6=1U zvKl?fd$UIZIwZd`#EE_}OpP9pKEt&4>4_0uo&3gr5F#ure%sa&ZFy5)@%S`M3|!HL zd>O~KQ(2lA>&n}_;+O7thRSF7W%lWz7yXf+>^$TbiNNQ{jeA@$f62&~?H$CivJitE zM@e_vcT3B-{l1a&spP|1d0z%1VL zY_cX0Qq={kRZMKgWH+SCL<7w&$p`A7Rdv;`Ev(WGMPG%fTw_mS0$1z{qDP5?kq~A9 zW5U-tAm7CqLu;qc=k3R}_Tx5$Cixx@Gyi$1)Uz|6-I9LQCcwnX8Tr!DBG;D(IGy@~ zn1d1v#LcQiQ;$zT#pEqAm?%A205b3B727oP*)7zM#Y_o;Xbm`B!nK}M0Vtt01ON19Mrr->1PK9LSeU6PB>?yP_A0Yz zs`>V}GD%XP;n> z5hYRtp7jmm5}j9ZZSvn9hwSBS@Tlor%VNL_8yE|s!3>`w);(sEI*$Z8>Covk0qvIg zwD`sva29}1>5^XuLVB1MRJHq->bf4)3p+K@U930TIlFZxgO^@GhUCH9#%n&~p^%lG zSrc-phfP{bP9GRZg!xG|qbS9+)Itx}#~fkL@`&VEg+pFkJ!i8y#ymGs^T|1K&p}}Dslefr zDTIq!^!O`Vy7ZvZPT>u!mZ@MBqQ;LoAf?Hjp?r&nkbIYWq>YUOwh8;h3UBQ+5p}y2 zQ7kyi#KrukkEEon*V%_T4s{rn^%d^yb9%0yQ0&vW!!ob986xMxpF3yXn7vS(Qtghw zomhx75%`&z`LM|7iprm#+0mpu_24qzBZu(~5vyJLetkD>&KLwilBO=pm-H#F8wYc? zoFgV3nvj5eevZz2soys(4*(EQs#6`B*R6(7EejRg{T} z3-H8?4YsyMbc^Zccf21$VXqcFoM>|^zxSnf^Vtwir%7vU?0xy3JvcIEPboXE)xqI1 z@Y$W^)wwHrR0=~qoU2P-msULFEPT=U66815;*q8cA(<64JHg^Pq{2naB~stB2=53+ zc-9C$)B_(e+^3LI@tsv3c7lCct3e#K-B`t6dPb8=*u*_9nI`z>Zh+~$dN zBWg=#AvVw-Pxz10Pq#|#Y(Fy(FV5GaH*dti@VyUoYWn$!zH0JL3+rbq`LsrcW~PUZ zjpu#dt~O0Y`=tbhVh6raKeYC5AG?(1!U9)c5`ySFHB-If$Uf7({Bae1Gsr)xq$SiI zn2fN{Q0fc1?cfUbb_=h3YN@D6(M|pgA$M2TV3hjwNNWStb3v}$GV}cIA+5c5v7DIv zsjKCHGFJKKlg!i^`+GD5Pe_uDH!l_zD&RADYRrf+W;RhEW7uscOXG7&3W-*FO-3#8 z>*Gh6@z!e(0*H8f?}iK;Zn!0pOY96PvR|wa1p|v}t(P{41ne$kvKoP97(xVlxpXS? zBBc5iU+H`f+3Ykdm)vWbjRH2^GLJ%TT~u(G)Gxj8gbu8?m~a+k45wc(FkG0>;bEY9 zLf|J+dWw4`uUDs(PIF=vH|4J22^lx8(D^;3TKGwsc!|!jO=_zL*Dn&%p@K-G(kaqI3P?9dhk|s&&@BR@bm!0lQqm0~-O>%x3_YYn%$xxp zectc;p6@;1xvq1r!$0@zJN91p+Iz3{Tfen#LX;JyaIr|SkdTmYWuzrkk&sZGk&y0~ zVcbQWVW@N_MI2C_#AMVl5Px16Z$BW~B*2$iKs7s4pz9k)6C^WRI~x;LCu2tw6I&;9 zJK#P_i!c%rHIj_P3w5{Doq6Ba>T79dS2Yx7sAirVVCQNYxvw?-QL>Ik`)c zDBYnPYR=`xAQ3TCl{?&#q0NTACWFo;PR(ZbVFxSD-b44ohN_RLWuj?hqH{v+XNX%w z1t~YWIpK$6{v|K3&(g@CFHIZTWaZ_#dJmf2t$lcNFO4@wgUjv{0fAX%shhhPM~{z> zA8kqBG)$s*cC42nwVB~J9jMZQ!C;5nzZ#!Ix>iodqHeDAay4Jc-ae0FVrEuhHOVGU zaVw1c+x(|DzhYnP6#Vt8703DJvFPXBU*EKS=voN^+&nCk)csqGfc6!KAY3xj-w%iU zhexDo?*Dn@b6#H8@26sD?qil$RN(&ZUPeg?|FzOzl7vR5r6~wf^8S`t;&&G$?V!KL zC+Yv)Q@~p@vlwZbKcj}&wcLu>EBfc9U;o<d)OdpqOCJ(I{hX5bL9MvkPt@mJ@Z;zyCl0wVoQqSs7RQJj;l)G} zB0F;vRn|CZ3fe3{Y0H>vQ7r7dg`9TU)pLn?vAeSGelm!&YRD`*Y_X`P0FE|93ad|(f64vjVd8c~&t|M>8OEU*2+1M5< zdf(vm*ZSbvuvg!CN_~QLBGULNj!ehGms5H#5mQ}XUj|}tUhj={#Ivd@oc-a{*HCV| zkU*&F-E;-L8o`|pqnAI7EJ4;;tawYC3?G|yWX5B&TuZ-5+nGY+Ccj!Ti?V9!{g4>H zs)=f_zV3v>!c;h{Z@atL!w=g)tNpS`W2+KweGl75eQ4rsqRNJRHdpienJ_fHs-1^) zZzkqon)tZ)fBol>Y-$z?}!OMK7GZ~(D>oAfZ12C?zbNW$jo;? zj7DLo={P@#>uAVs0j8du87R$ny0|pm%iLVn?ysj#Cm`^(<^L2vJ_5(ni7xz1 zI1Kqg{?O(_%$ysp=4IJOPc~-Cd^_>$WqHgPB~8=a(*y1T0qmTIhap&{z{c;)y1Lp8 zB_VvznBvhzw?C$6AH)nu47EfFH1noi78fSL1GpQJQN=~;!%oQBpo8-es$_R z6r92my%1kqTQ^Yvrt9>~rI>QK%SQ(h@g6BfRJJq(iBcL~DAEQ#eyz@5(PF4z0USD+ z6r6w2bKJ)O|1QkT1kkA1Oy4%oj+Ch=OnPMVcHoE+0RyXW2C@Zbsaw5(XYI;7TSG~7gMwGS;DxkNysZmGK6UL9vVynWAig&p5@uM_2Y&c zHNpBR$>wq|7TPW3*?NLGvo66&bQ(6n*?6Xxbsjp=1Px!`mA?~w#2xiLo#pk_kZwM} z$WvoO>C%u&BA3 z^ISGuUtG?LctvDXdi&r$6SnIjtxoT>pgbO)KhHZQ)=#Vha z+iTWIR;5=HHb!=TOx^TJKkXaYNk81wcpnNT%uxdZMRBMj#eq|ML|o!d80h}ID>S1N zJF-aHDvlQqr`|h#xa&X2*Dw<4`YbFFMa5C78SZ=T>g*DQoHd*}zp)<8Nj~LczXF+G zv0*M*`i|}yk*oUc6mQv^to?i4s?QW&BjBs(`-4_GfT(%5$_2U1E|h~=wszl+k$~=g zPzi56Sl0CgUvO#@Y%ojvaWp&q8Hn}T+aQdMZ>FN6upyu@mVsc~I&*D-&N6{p`8-Ya z$!cT_8fstd&V&WwUKN=6IXRCV(n!PRhde(5pbLTTgYK~Vc&pJdxHQYaoQ&5DKk7cY z@kXXxl)!&1wBXwskxi#zwXf)l5{^*p%(x${44dOUo_XX9VeJ>9_bkTef z#`D3FurI#8-RIM@Lf@LV#68D*Ryi?1+Q;|t?mtFOYnX~#PvfoY5KwzxQI3B;a^qc2 z5@7oU6$$qYi&x)0YUdqOz7WGgMvDa{VW+WaEmTUl6I7@c?*n#%+-kZV>?P81417JKnp7aMn| z>adp#y@c0MmILjvxuIrTZztWB=9qHLm3XLc+57L$tZnq<$|QaUQ|Zr)z; z3OoOb%r8+94QwReznYsCZTr01I<&YTeP!qAYgSNwH`yReMT_9Es+QQB#;E%E`Ie!y zlQALu4zpC8V^hh3oEEE_iCJ=%7NbB$>~mp|d3``VE%S70&^r=dj*CV?0HuC+>h?p` zNsf0TA%@xJ=8LI`XPf=k7a8UU3c75twS7F&;oXO+!ziqj+n&{bN08DmOYXAt7PR zMIThp8%nEp*d!_GRrv8R>KNwJq9)cgVpY33B3HXwFNNW=0$>`#xTAt4hqdW;K+0BU z!wM0Z~%LL09Dy6+3+QLaJdj7=}9x@iOm7nH{Tc8}P)x%X% z-&k}u{|cQn;Rb~O`tNp}GFuqh-l86G)9HebvpFfP+)Y>D$mf3mNr=Qd1W5c1XZ{84 zA#j+?zreJ=ksbMW5WrzV&sTWEkT2rA1lDp6%h}p+T7qtHZVYSU*@`tLapO-^@Q+`V zLH9^feOrmf8iAXZu{VNk;Y;BG?_4ngWzi~1n4g*f2VbPYdxxyFFJFHsY}SzU!8jZ$ zG~ENgI1CqP54^>s-Vz8Pb51+vzGn6*5uWW)P1n{wSnzWd?sMigiCw7e1-B%vX@ATt z+0!wuo}QqDv1zH@qc1o4qmovUiI8@G45DtppSH{p=@S@aBA2f}Ddu@UtL#1#TX9wv zwb*36`{h6yE1_aB&2a^3vQkoCIr~JJ{R@<`B=~om4f^~#FTBDU ze$QSLbr3glb+PFn5qD*}6Hx+wfiYBR_5(i2%jueR)vLIBq8|s8Sd1XX3YEL^Vtf(r z%0_FqqE#hP&sTJBXEoiF%J2pQ#V(C`^{QlS0*)6c|xSK(0HWFsZn* zI_P29>`Hu+Mu3H!WA5%K)LN>p>aGYA4n)Je>lZFbTHnceHldp@$oR6)7+Y&5ad|bQ zA`$q6fZ_FsLe0VMsQ5*HM1@PMtM=s)pF8p3^5<=~Y`!1gx6pXiMw8#o=h2gC4nHe; z_vH&D)T1J-KDUai@s722&-dJucBLf7cYBZb14CS8TWj+xL*t{-@23KZVu@B;^Fs(>Z|`jTE{KgYzEHKu zs7PD9n$#%AZs}BD{(QDh3)a}$o|OsgOBBowUCvZ~_FQth+Sbv(S)EqzdDyhtMd6u^ z$k+ThP&z4sL-+F=-CVzLItng=RC_V7rmHf;8$fn>LU=i@BJjTAO51_V&AtjP9*#qY z7Taq(5KWUo<*^04b*A(FFR2q!8NLdPh3r$>={n#N3r~5n#35I^65z*jHhJ8KMb#-kX6-FjyLC8PYCrhW}FP$80OT7)|5F&sou;cUvU7b!S~CV+-0+ ztOVg^>(uvn&@9MF<~cf&OWg6S2MphA&$J0s4$2QReGq zy}i`LEGsKA)}i9hrrH$T_=HZUsj8>*JG3`?J$~c00|G>;eF_Q8@YcsIOt;GNd5P;h z>)Ew{{KSwY^BImV+2qJbREF^=(?i&(mu33?I~%J+-V-r|t;cPdO?vWZfEIxI=swLO zri|l0_XEiX%n9S8bCp3IHSDh;A0_Wo(cP1&IoOQm=Heb#J5qkYm_?XQiSjdF8DnJE zy2#GxXmG4yBTO&_Vu;gbr&;l9m@YYGqTKD}(;nqeh#@22Pn(7<;%CMqIO{@VVp2@f zgJmk;?_j+AehX=7z)!bpK!8w|yz`NPe!~grfp-Ihm-`m6=`*ssM+FN+&)Vpfy=_uE zM0DDs-JPG&B)lsJ%To#)N_7;|!q7z_?LMdu*Dam4O)8t?B}MUhYI+Zi*8x6^9J zReHgtA-VSIRqV?%>lQ$UJ%?_tP=hXszqVNyrTdIQsx#W*ao1UIV@Cg6l(#}1${!k;-zpr0M@HfjQ2j>)GXwhuL zt!bztuIo{1ES|j@#-C!5prHyld}?r(1l4o1LdkBvVxQx8XzyYwYx1F0Si)2%loFY6 z=IYKdT#teu;V9Vcc^`1%^n@Q08w!UV4Sl%3BQQsi2N|XV?IMjlJM8oASzRtVYZ){X zZZoUMZtqO}#&0Y#R3=4Hf`03qzugZ(<>%+WBG@|aVKp7m#14sk0_(avVXHN8x!+3R zsu@l73u98imjg|2@OCtnVpwsz~WXtOb8sqsonmpU)MXIT$6({`k2d5zS zrC>AV_SMR^57@OxiE=1T6ZFl@&@-yz>?)jC=|WBst8vkJq;Ua8?*lpZ{rW37&1-ws zw{-*8<;D&N$hQOm3>#`w7pC4K52tbFyO)IovBs@d53JbjS!dc?(q^WQ|M2S7qin2C z)FidiDmM~h)m=fX%PA6~{>IAGcNhn=ulg4~a|Z|M;um$r=Rdnduih6h15~iF8cQHa zw}`cs(~gx`WeQ;qYhe;4XsB>Bg7_0yK6B#=@&q68DlUgNMO|Q$M%&pglH454nvMQyw1?A zLRvOFc8g#zFAX88)t#1x0A1LkShNzm5llSg&eY}40T!>Ez@Jl=w)HNg^sW5_L z9(t!8e>Uf_GA{_W`1I+;E|6*@`DO#1ADr9e4(jRy5u|{w=wYsub_FAvb|7Ima&;gQ zMuNQYY3viESyJ#?)nnsnPTsHZAAQ z7$P_|C^8wOt@0EOHjXt!0JF@<8E*!J<2qB)-43VjAej*Kn%;Wi*K< zDQN67?d3qAoJT~i|2GZ52=qEcTU zZmt-kJ?xa{xB{@OP%FjV)u;>KF7}xSW3dShkd2Jv#MG7H)yd?-`yo7&ZkW1@qmha2Kz8$_1TdQH;TUK+Zo9--KxEnJoz~kuP zzEs;xH|A<2V;G@H$3y6dc5ZBZtg6?{1uEczfn3CYPBis|*)kulz=?~`W~xB;ekf5C z+}w~O8Lvu;9pzExf1Uu4wzkSSJx$U6)xIx{hQ)B@NZB~t-fMEg#asLavWuCj^&~jX zK_G`Vm3;=N1!y-I92s!2xtfuy)x+O!XD)T z>CG5aEb*>$q6zlhkG2?$DTa@DrlTIC=(JgtHgW&tzHzL=`nTTbuRv0!_ixr6p_uSL zT=%(~kn>1NLER;(M$6@$6?(-L&@6pS%~0x^ra{}6LC9QXQS$P)t}mjuK29wcq17&I zXXLtA#V<57XpQrO=S0qxXxI$t>au%ge&&yd&yt)@bDesWHdJ3{+~ysQqrCcS3|}%l z(tL48wBCOJe`1)2D?QpyU6~MRqQYIU!t-Ui)%aviWS%dwWVW z<2;9P+T74*Z*Ng^z}%ayf(QSR9i0%)0~VPMi%bqnK8`Wfz-TqgDD>g@ zL}`_|{mbzS+?EuLy(p6d4^<4+zhvVf=l&i5U^H5{c1l+8fk(}=PoE2j{21d-&&I@$ zSlt(Ev8s+WC2yBTJeKKSIj)@98Se$wRlMrglaq{q$?4-6arZ0^HcKC47qAdjLdAg%OhAu{>{B=l@q)K z^a&O8@oEIjMu)tzn65l3lVdOKa!_#Cv$uM&di$_GmzH_^p=!~gy zd!EVL$6H%FPtYW?`J2#@>sE2lt5JYug%xCo-+PpFq_p&7yaF91gEWzEPfm!-P0z{* ze?T0`u{?*c)_g#7&5UJOU(Br67N{oGJk2PoMEQ6?z5L_ykH#a0ijj6Nq62?SjHV?UTWa3HfGN z;K3VT>uZgUWV$9Iw!hYDtPz&jkLHq~xG;sW()>(Y&qebOF-pud5(qyLueLb$t@mOA zk%Qtiu-D)i2BIr$Cr>4B{_LRNUtZTu`08_Uu~=H@bBB{|;PrwyD=_EXDmfV~xt}tP zbw;)i5Xa;}-=pdZQ1GO+`@Vgb+MFjGU;wBO-P$&Du-)TH)?izQh#k3VoDzEIx5=NNay9-abFtKDI4iC)9RT{?G?LIw@@fufK{8ye zv##%n^2sOZ36Jgb72|#1hHmZA{NzZft_MeT>swZ}usu34GY!Z2u`F>|{y7jc-SIIo z5#>O99@Xpl@I6<2JgUB;z)xCSXDNhf$`aZTb7}giYMcEd#kZF)O$gzoM+`k^3+Gg^ z2I`W-Q%;N&XggD#c=gb$ik5Zm@XSgta{6$}w#k-r+Hjwyr1SXIH(ok7Z?rUm@Z!k1 z{RStf6^qU+>EJWr*5fNm>?q!nkGis7LL|k%F&ACp5n!B^Xkdh9?C37$9_XrM-OI3I zon%J~3Ifm95UXd_pEO|7NwTnWT55?-5EkAS2)OTj!~sCfO8(0 zfs#dHoRNT|xI1oZ+}~KWu1(3Y4EIkV=glHBST45tBOO_j6BZq@Fp-iwOE%Z&i!0BwRwy-Avh* zr!*hiTC!ahxclo$L+pFF)(!L}2{F<%dYl*8p1RI5tzTV{KZKR1eE+oJI1hb#x84y< zjY`4^*(Ri@3SiZRT@DVbyV}+4t2?64&M$4b|E>i{PG4Tya3RP^k+_hf5gub63P`n= zyw^4&V7~C9zk1T;;3A#PWNE(7e9ryn^tDEE`neC7o&C%qKwBmd>`wMQEV{wN0$uKJrV=qQwq;EP(SvuzunmshdjF(%0K=lTe$RBR4qB9sjEv?HiZssSlEs5 zy?ojhgexFV)YLL*;A!*CD)L%m-e1kR06%Dt3;zYCP*3^X#^*T!JZ>I;rMAK1{{vX@ z50wDY2&(QWdOZO+dus_}S>F!N99IhG-EWvdO0kKBN1=&887E$*KRJ6^l>@G&0*`ip z0KHy_8$}j>pHk~Fx!bFiEk!BtgZ2i30H&uI>isT1_`8LM_Z@J}(Bwz$Z z|HF--$^gd<>_}l5o&}CLq9yDPHXhLm#;|bOjppvS$!ZSgj;dHxq#5_kebZr=VWtwH ziZChzZ{{A%jc5;;$mZ1aizh%PHFyg%iRaE9cJspbk12L$ZB)xZ+@6g_<`R>wf;Vp2 zVf6ck-;Go5H*Kte2FLoEJEk@z_nMa$-5}%CH3svo9g}dB4GLaS!=M|HBERN^0~t`o ztgj+m6q7H+&F<|5Cl%J7RKhDl}_s3=faDh8+0-Hp5B{TpPnt* z$J+DBlyg>@?VRWX@6nx4!}4#86<6Z4niPOMYg%G6<7#aoqJVf?Wb9$VucK}Axhvbl z(;%~H59>RDU7ZY2)!6v-;@PX9LP8rQyVu(g+Ef5J2GYDfjrT1o_s)WzGJ}q+I?*&0o;*Zqc4t*X%xzeL z$X!wU)g(Rl0kqXS?XnD*i@n7O-obYLb!i30c;E*&`3}4ai6<`<74f7Hl|08dK#q~- znVFdn9Yz;I^l2`PH&Dd_j_m(UG~q`n=xUrUoWzFl^syzB9!T){1%^#8esa)uPDBQ@T+D5lOo5Xqk z%#0!BKdI)UWDpJz^Tw1=xFF)P)Kv1{VVud|wg}PBpQ0y}KlMeCh>3S`ss3%s=ni)^ zd$oqmff;f)HXQu5;@?&j{oi=Ozsb;ltNp*N{drrw$na6pPrnKK_Stz9gIl{E&~X?} zws{|qM4ZB;=&8p7oxtFZRW)}T?_N*uNZzM^u-%B2XZ(AO`V0FgrP&nLoqr^q z8EFtf7keKOrlS5FBtxu3Epk}4|D$CDw?CaLJ!pj)nt(dDnR)m{qKo9%Bsbi=Q;p;QC@w9we&4+@mCn%&suY{41J3u=oVHztM%Q zA&Z`D^TzSy_)Pj{4srx~*%}KK0q$Ay| z+*I*#HF_}!)MOPcW0#k;-G73Jx8f&#N(jjJsjHVA0II9W2}iebL&FI21Cnj?1|?{*q&q#Tw)D%x}EAk(WznGst%7+d#7-yp_nDxm%WGklkQKjYef%B{zB+6YV z5vyIuPi8_-ZYMX?b(54<%E?1>m&SEC&Ng=`VV8jIvyss~sBv!+7f-^N-Q?)hohGnM z;>bs=kCJODf)0X#k2l?#lTHJYv4=VH)Sz)jvU%#tTK8gFRAg%_Lh3)4xRwlbL%cj?89NMY6I_8(BW}b_F!0BjNn0E zSwCrt_s^JH-a8UcC4cl~+XLsyJVf~515mlY`u0xibG8O&(m>B*{Es-n17v>3Y=C7( zYSc7}_C-|3?Dr`A7in@Uuq}*xnCOow=wUN^;t&K6dm6eS(MIq=*=JWl?-|pt!T#)W z$!{sF7fmjT8ntILX$|YR)|mK57m3f3ein>ddN zD)&!2$=Xz=hP-=9;@!`aOm)REVAh{w-tDP0JzN`_8^LujGAJVcmh0sjXChRS{=Q;M zJBlw{2!s`CygrQy@~BLrXpM?hXi1Gu!~gmVLM)9crKX`h5IS1!Z(Cht*x4csv#M@y zz@*e{cVA5InQS5O*;lyH21zO0a|_4?i3!%IIBZ>GsfUq1gb6A#Xbko{Nr`6ojc__; zRs^1#%}CSCWoC5br>DuS?-}@s<@r^ybr1H zS#u;!Ce>;J4uB?0A7uS3GRgwuB~NgdaSykCQd;O3s9}3^bsA86%AYHNxVyuV)MvDP z%ahn~(wo@=cGL{#Xo6n=$Jt+Z=8*Jz<_CtWN2VGhrh(w&dtBi23659=7UQWw4@!fI zwWc--C5Qd1Z8z}d^Dm|89d&iH<~ntWU2r6VCy$U^xRe)F+Fz=D?Ae?7UY`sV{92qy z;;Y}wq2FRQ-|K8GBYw!+r=_em6h2H0Ia^HYaGz#BI7pMT@VI&=QhvSE!C$vp;s6v1 zMLH9x{sc=)*$kJRujEsn`(I`@wV`p^8o8Vq#f#KO^5csEfgEd#=cF&iGfAi;rNG*6 zw)S;Jw+kaRReOpUq3Ed>3-Rsy4;dRmw}>0G8f}TNi8~mQR2609a8KqL>-}$d1x6O1 zqImbo<Gu8E=2vRaovNFNS*#{Mkh=rq)R`O0Ba$mz^Ay?fI_ zy$MKMrPRNXY{DqEq-S(XI^e@Se`MSfUK3+uy5yEi_wjQ!HiMKXXti}cw|aw#(?chS zD)}&C9Ri>EUTrgf0Bw%BB_8mh>6eg2GWW(6Uu6#!L?sD*d2R_PxW>j9eVzt%#a-%v zvk@P$(mrgztl82YgH4uiC!>04qPcx-14rl;;3s1w+O2638sxtkl_^aU7R9F5sJvv4 z$H3}Jsj$rTpx*VvmGUM$RIL1ZIOdlH{%7w-`s}iS`*jK7qnMW}FsolhNTAHIBNVs~85qAxjDu#uxR z5xlr@3h9b;Tc%1p`B5EVA3!kO79J#;=%6{cJQwA;cQ3SYgWLMr2j(dGAxin4g1!F> zS+L*PdQBGmj|l?7Z>KM=5s#`V8DXa4a9p3;@~~&VphE_86rTUi^JZ<~bN!S;#ngkz z*f6~EH3TW(_Lj_;iQQ~2pIUcoZw)d)wRgBQYWmF61GBAKu>ZU7rSO?Zg3<9*DW?mt zML6(!}P^U)$7&FFbdvCV!(Z^iPu6 zDEeY89;ccqe3181Ko=Lh@VLLK=mC8HE}!skGw6SV#y^>dSNKu{`yNiBR(wVTV>e$o zoWCC46xFoftW}jFDcs;hNteM3x+MzI*-pn))&-0RE}?%;$+B@OcajPnPWiLFxS5%@ z5=7#m59j)|Wul)){zl9Hn{xYq2FIifJ-A+Wy;Xws-Wk|8IVzL;e~SFSBKQm>@PoB5 z9#Eq%W`vFGvCp7TT0w&Mr?PCZ$i3HX(Yny3(wO%ZPK|nx4KZ4xkE`8P@S+ zW;S4532I3Ay4u`Od%CI-&*By8Lfs4#j<;s|xsQm_=S6D$9ZGGos@TV;wY%{5%xP*4 zUE=2&-qY3?$RB{IzOE+7WTXY}X^Cz3EH0H?#D85~NMV#K`Z?iiMa%7{#C$z0sa6m` z)mO$Y7b3Du=wm|e{@9P8%)yxC#5ZNCmSTpd>OK=7=DYpAR%>k;In`!b^5u}3#g!iB zisF3JF`bUsH1V5J-^%Z&VO&=hge!FI7F>RIaJ6VNJLt2L?eDg~To-ZYCR*+K2??7wJ4w*w1QGmK; zw!JBhl4I`afubA1zBIO^{EROB?a^JGXe1e(37+N(Z>CFPGe^tDDCd;uCUh6u*YijA z=LuLpr$!lJml_krHruE~{jXIBt+UxUTfi%HJ?vmsOO%LV{9J2gidTX?-`9jH2Y+Ycp1?oDK#Y1&Jz9W*I4Zvuqd*+| zkogx1p5BT@{h!2AYRUR4n>%73fw&LJu0b|8+Of_$??&`{n27J~kdUw>ZkzspHFH!8 zataD$@^1!^_-laS|2W?NgC;%h!1E@sj;q<;+>fNR`6jTOxNM%%fE1f{Y#?#vXIqTD zAMml6qDBX2z}ac;aarkl{TA<0AQg`(ENrnai!=D=T|%WQ0jp+MI#}>@$m!bX5wNG;`m8!4YoF;rc-Ca)SW69Rj z0Gkav3ymCU^C(G>o>J3cceT+GR|Snr9~+%6d>FCv+o~jJ9@WshYMO%epL^+ROSrn_ zTNmh#P@+-JCFgf({_~#Y-mC2_+B}?j<2VBn!LEDVoq2Shz0VWJ@CKq0;v6sq;5(nr zB?#~y)38!#LyE0N7JiLfBsi~Emli81ErQ5<3oPXAsuwDlYKr&!OBQl6fgYwl)++IO zJe;|fnzk3mhegFn=_XVnvD~ggvHDV^BQC?-bv_QBr{2B}CHkW=G}A6!#D~yyUk+x= z=9D=4oiVWQRgkt21?PY4}RlYCc96OXX#16ht7F4eW*0Hbrtgg(L6gNka4 zgu-N87LRhh*1`vIE*G+jel0s|KPzTW**lkoxeV;lR23A8YtAQ<2@jtZimSVtp44cVjDk8-J)s|2z?(w>Wc5CEZ2N5V zyc<_$VZB4HI|HI$pwsxgX5LCpu$^RgZJ+r-XpF^(|o+(=y@i~MWMTP8BkJI$+o96{j>$*neO27urfl?dlubs*U0~R^V+4g1_QjW zz)Gr2+?VIMEHVjNV{Fqy>p3VeQsUU9L;t~fAPxM4Pd&ffby zTt>#D+;b>%EG#Xx$9Ow>EO+U+T3)yqqB*(0CvWB&&3NRr)_7WJSWAN)k&vDL&PVf$ zUA{v2@rHC&bXPr|*&)B0(@c+yY62z|0&WfM^qjVpHhy`$;bUnKZ{XHjW~_!gkTe&4 zOv!iH%Z`kM^zCbbI+KN^CFv!ku#+RKKyKthkG6-o-YGoowaiG2cN~qetOfr<@|?0_ z0Z)(r!cJlHv4J<^T0(hdXOMBLf-iUJzOvzY^jMr6J5>N<1S>7r-AUx*cY#{C^m7mO ztHPGpVa!Za&(elrcRr@w{?7GY zlEe`nOX7JNvjrGE=|%xJR-gHx z13kL3GDmRgDN`7(h4hG125*y$EO@AS!vnpJA<)@P36Nf7dMK5mmeM@dlE<^Zofv~Q zYH%e*=63p?_`-E{uWIPCdn+1rNBczPyaIaa4Ba<$P0t=D_r~r-M_BPm$3``?gO@#d z0|RlyB8fw#(}{8E+dVt@HMy}M;8oYUwPy(rCtFhTMDhd>DBtVt3N$@g6p0Kv>YMh4 z&reSnCXf&hDq5Z2oub71X#5jd*{*e4{sds7YQu7XL$ zLOtJp8S4&8@`m@edZRn><%WJaHQ6>E+AxQun`K1Ehf`8^5C1|(%=^Iv$vdg!$=uzK zWQQjc4MzbdDdphB>RfhQ#-Y?F3?%QIxXzZrM}#OJdwq4pi(uRjO1sZ z{zQ3@gG+oW@8bb>oBkyG*5K!6SJ}|66j1*#4Lx^iw9~rcz$Og}J=^$m4&tU30eHQo zKEO9mc#Sj9bWkeAX|i)bd;Ylq_Fz&nV^Fhlr|^@}P_he9Y~NbuRKbzr!Kb1o7`x-_sVX3z zt#3{Z)ICFWMq?~6BJ5CWcI}oQ0mLT@j>7FN*L~NMj~B`T_;clgWTTPsoPhGTkgUoG z(!FWKZAd{2c0&+mpF%z#G(@Y^Wf@W!)h2{5r)5h@}Xz?4UQV3e%s\ndeleted->%s', repos_infos[duplicate_id], repo_infos) + else: + visited.add(id) + repos_infos[id] = repo_infos + repo.name = repo_name + repo.main_remote = remote + # if not, we need to give information on how to group repos: odoo+enterprise+upgarde+design-theme/se/runbot + # this mean that we will need to group build too. Could be nice but maybe a little difficult. + if repo_name in project_matching: + project = project_matching[repo_name] + else: + project = env['runbot.project'].create({ + 'name': repo_name, + }) + # also create a master budle, just in case + env['runbot.bundle'].create({ + 'name': 'master', + 'is_base': True, + 'project_id': project.id + }) + repo.project_id = project.id + + cr.execute(""" SELECT dependency_id FROM runbot_repo_dep_rel WHERE dependant_id = %s""", (id,)) + dependency_ids = [r[0] for r in cr.fetchall()] + + trigger = env['runbot.trigger'].create({ + 'name': repo_name, + 'project_id': project.id, + 'repo_ids': [(4, id)], + 'dependency_ids': [(4, dependency_id) for dependency_id in dependency_ids], + 'config_id': repo_config_id if repo_config_id else env.ref('runbot.runbot_build_config_default').id, + }) + triggers[id] = trigger + triggers_by_project[project.id].append(trigger) + + ####################### + # Branches + ####################### + cr.execute('UPDATE runbot_branch SET name=branch_name') + + # no build, config, ... + dummy_bundle = env.ref('runbot.bundle_dummy') + ######################## + # Bundles + ######################## + _logger.info('Creating bundles') + + branches = env['runbot.branch'].search([], order='id') + + branches._compute_reference_name() + + bundles = {('master', RD_project.id): env.ref('runbot.bundle_master')} + branch_to_bundle = {} + branch_to_version = {} + progress = _bar(len(branches)) + env.cr.execute("""SELECT id FROM runbot_branch WHERE sticky='t'""") + sticky_ids = [rec[0] for rec in env.cr.fetchall()] + + for i, branch in enumerate(branches): + progress.update(i) + repo = branch.remote_id.repo_id + if branch.target_branch_name and branch.pull_head_name: + # 1. update source_repo: do not call github and use a naive approach: + # pull_head_name contains odoo-dev and a repo in group starts with odoo-dev -> this is a known repo. + owner = branch.pull_head_name.split(':')[0] + pull_head_remote_id = owner_to_remote.get((owner, repo.id)) + if pull_head_remote_id: + branch.pull_head_remote_id = pull_head_remote_id + project_id = repo.project_id.id + name = branch.reference_name + + key = (name, project_id) + if key not in bundles: + bundle = env['runbot.bundle'].create({ + 'name': name, + 'project_id': project_id, + 'sticky': branch.id in sticky_ids, + 'is_base': branch.id in sticky_ids, + }) + bundles[key] = bundle + bundle = bundles[key] + + if branch.is_pr: + if bundle.is_base: + _logger.warning('Trying to add pr %s (%s) to base bundle (%s)', branch.name, branch.id, bundle.name) + bundle = dummy_bundle + elif ':' in name: + # handle external PR's + base_name = name.split(':')[1].split('-')[0] + defined_base_key = (base_name, project_id) + if defined_base_key in bundles: + bundle.defined_base_id = bundles[defined_base_key] + + branch.bundle_id = bundle + branch_to_bundle[branch.id] = bundle + branch_to_version[branch.id] = bundle.version_id.id + + branches.flush() + env['runbot.bundle'].flush() + progress.finish() + + batch_size = 100000 + + sha_commits = {} + sha_repo_commits = {} + branch_heads = {} + commit_link_ids = defaultdict(dict) + cr.execute("SELECT count(*) FROM runbot_build") + nb_build = cr.fetchone()[0] + + ######################## + # BUILDS + ######################## + _logger.info('Creating main commits') + counter = 0 + progress = _bar(nb_build) + cross_project_duplicate_ids = [] + for offset in range(0, nb_build, batch_size): + cr.execute(""" + SELECT id, + repo_id, name, author, author_email, committer, committer_email, subject, date, duplicate_id, branch_id + FROM runbot_build ORDER BY id asc LIMIT %s OFFSET %s""", (batch_size, offset)) + + for id, repo_id, name, author, author_email, committer, committer_email, subject, date, duplicate_id, branch_id in cr.fetchall(): + progress.update(counter) + remote_id = env['runbot.remote'].browse(repo_id) + #assert remote_id.exists() + if not repo_id: + _logger.warning('No repo_id for build %s, skipping', id) + continue + key = (name, remote_id.repo_id.id) + if key in sha_repo_commits: + commit = sha_repo_commits[key] + else: + if duplicate_id and remote_id.repo_id.project_id.id != RD_project.id: + cross_project_duplicate_ids.append(id) + elif duplicate_id: + _logger.warning('Problem: duplicate: %s,%s', id, duplicate_id) + + commit = env['runbot.commit'].create({ + 'name': name, + 'repo_id': remote_id.repo_id.id, # now that the repo_id on the build correspond to a remote_id + 'author': author, + 'author_email': author_email, + 'committer': committer, + 'committer_email': committer_email, + 'subject': subject, + 'date': date + }) + sha_repo_commits[key] = commit + sha_commits[name] = commit + branch_heads[branch_id] = commit.id + counter += 1 + + commit_link_ids[id][commit.repo_id.id] = commit.id + + + progress.finish() + + if cross_project_duplicate_ids: + _logger.info('Cleaning cross project duplicates') + cr.execute("UPDATE runbot_build SET local_state='done', duplicate_id=NULL WHERE id IN %s", (tuple(cross_project_duplicate_ids), )) + + _logger.info('Creating params') + counter = 0 + + cr.execute("SELECT count(*) FROM runbot_build WHERE duplicate_id IS NULL") + nb_real_build = cr.fetchone()[0] + progress = _bar(nb_real_build) + + # monkey patch to avoid search + original = env['runbot.build.params']._find_existing + existing = {} + + def _find_existing(fingerprint): + return existing.get(fingerprint, env['runbot.build.params']) + + param = env['runbot.build.params'] + param._find_existing = _find_existing + + builds_deps = defaultdict(list) + def get_deps(bid): + if bid < get_deps.start or bid > get_deps.stop: + builds_deps.clear() + get_deps.start = bid + get_deps.stop = bid+batch_size + cr.execute('SELECT build_id, dependency_hash, dependecy_repo_id, closest_branch_id, match_type FROM runbot_build_dependency WHERE build_id>=%s and build_id<=%s', (get_deps.start, get_deps.stop)) + for build_id, dependency_hash, dependecy_repo_id, closest_branch_id, match_type in cr.fetchall(): + builds_deps[build_id].append((dependency_hash, dependecy_repo_id, closest_branch_id, match_type)) + return builds_deps[bid] + get_deps.start = 0 + get_deps.stop = 0 + + def update_build_params(params_id, id): + cr.execute('UPDATE runbot_build SET params_id=%s WHERE id=%s OR duplicate_id = %s', (params_id, id, id)) + + build_ids_to_recompute = [] + for offset in range(0, nb_real_build, batch_size): + cr.execute(""" + SELECT + id, branch_id, repo_id, extra_params, config_id, config_data + FROM runbot_build WHERE duplicate_id IS NULL ORDER BY id asc LIMIT %s OFFSET %s""", (batch_size, offset)) + + for id, branch_id, repo_id, extra_params, config_id, config_data in cr.fetchall(): + progress.update(counter) + counter += 1 + build_ids_to_recompute.append(id) + + remote_id = env['runbot.remote'].browse(repo_id) + commit_link_ids_create_values = [ + {'commit_id': commit_link_ids[id][remote_id.repo_id.id], 'match_type':'base_head'}] + + for dependency_hash, dependecy_repo_id, closest_branch_id, match_type in get_deps(id): + dependency_remote_id = env['runbot.remote'].browse(dependecy_repo_id) + key = (dependency_hash, dependency_remote_id.id) + commit = sha_repo_commits.get(key) or sha_commits.get(dependency_hash) + if not commit: + # -> most of the time, commit in exists but with wrong repo. Info can be found on other commit. + _logger.warning('Missing commit %s created', dependency_hash) + commit = env['runbot.commit'].create({ + 'name': dependency_hash, + 'repo_id': dependency_remote_id.repo_id.id, + }) + sha_repo_commits[key] = commit + sha_commits[dependency_hash] = commit + commit_link_ids[id][dependency_remote_id.id] = commit.id + match_type = 'base_head' if match_type in ('pr_target', 'prefix', 'default') else 'head' + commit_link_ids_create_values.append({'commit_id': commit.id, 'match_type':match_type, 'branch_id': closest_branch_id}) + + params = param.create({ + 'version_id': branch_to_version[branch_id], + 'extra_params': extra_params, + 'config_id': config_id, + 'project_id': env['runbot.repo'].browse(remote_id.repo_id.id).project_id, + 'trigger_id': triggers[remote_id.repo_id.id].id, + 'config_data': config_data, + 'commit_link_ids': [(0, 0, values) for values in commit_link_ids_create_values] + }) + existing[params.fingerprint] = params + update_build_params(params.id, id) + env.cache.invalidate() + progress.finish() + + env['runbot.build.params']._find_existing = original + + ###################### + # update dest + ###################### + _logger.info('Updating build dests') + counter = 0 + progress = _bar(nb_real_build) + for offset in range(0, len(build_ids_to_recompute), batch_size): + builds = env['runbot.build'].browse(build_ids_to_recompute[offset:offset+batch_size]) + builds._compute_dest() + progress.update(batch_size) + progress.finish() + + for branch, head in branch_heads.items(): + cr.execute('UPDATE runbot_branch SET head=%s WHERE id=%s', (head, branch)) + del branch_heads + # adapt build commits + + + _logger.info('Creating batchs') + ################### + # Bundle batch + #################### + cr.execute("SELECT count(*) FROM runbot_build WHERE parent_id IS NOT NULL") + nb_root_build = cr.fetchone()[0] + counter = 0 + progress = _bar(nb_root_build) + previous_batch = {} + for offset in range(0, nb_root_build, batch_size): + cr.execute(""" + SELECT + id, duplicate_id, repo_id, branch_id, create_date, build_type, config_id, params_id + FROM runbot_build WHERE parent_id IS NULL order by id asc + LIMIT %s OFFSET %s""", (batch_size, offset)) + for id, duplicate_id, repo_id, branch_id, create_date, build_type, config_id, params_id in cr.fetchall(): + progress.update(counter) + counter += 1 + if repo_id is None: + _logger.warning('Skipping %s: no repo', id) + continue + bundle = branch_to_bundle[branch_id] + # try to merge build in same batch + # not temporal notion in this case, only hash consistency + batch = False + build_id = duplicate_id or id + build_commits = commit_link_ids[build_id] + batch_repos_ids = [] + + # check if this build can be added to last_batch + if bundle.last_batch: + if create_date - bundle.last_batch.last_update < datetime.timedelta(minutes=5): + if duplicate_id and build_id in bundle.last_batch.slot_ids.mapped('build_id').ids: + continue + + # to fix: nightly will be in the same batch of the previous normal one. If config_id is diffrent, create batch? + # possible fix: max create_date diff + batch = bundle.last_batch + batch_commits = batch.commit_ids + batch_repos_ids = batch_commits.mapped('repo_id').ids + for commit in batch_commits: + if commit.repo_id.id in build_commits: + if commit.id != build_commits[commit.repo_id.id]: + batch = False + batch_repos_ids = [] + break + + missing_commits = [commit_id for repo_id, commit_id in build_commits.items() if repo_id not in batch_repos_ids] + + if not batch: + batch = env['runbot.batch'].create({ + 'create_date': create_date, + 'last_update': create_date, + 'state': 'ready', + 'bundle_id': bundle.id + }) + #if bundle.last_batch: + # previous = previous_batch.get(bundle.last_batch.id) + # if previous: + # previous_build_by_trigger = {slot.trigger_id.id: slot.build_id.id for slot in previous.slot_ids} + # else: + # previous_build_by_trigger = {} + # batch_slot_triggers = bundle.last_batch.slot_ids.mapped('trigger_id').ids + # missing_trigger_ids = [trigger for trigger in triggers_by_project[bundle.project_id.id] if trigger.id not in batch_slot_triggers] + # for trigger in missing_trigger_ids: + # env['runbot.batch.slot'].create({ + # 'trigger_id': trigger.id, + # 'batch_id': bundle.last_batch.id, + # 'build_id': previous_build_by_trigger.get(trigger.id), # may be None, if we want to create empty slots. Else, iter on slot instead + # 'link_type': 'matched', + # 'active': True, + # }) + + previous_batch[batch.id] = bundle.last_batch + bundle.last_batch = batch + else: + batch.last_update = create_date + + real_repo_id = env['runbot.remote'].browse(repo_id).repo_id.id + env['runbot.batch.slot'].create({ + 'params_id': params_id, + 'trigger_id': triggers[real_repo_id].id, + 'batch_id': batch.id, + 'build_id': build_id, + 'link_type': 'rebuild' if build_type == 'rebuild' else 'matched' if duplicate_id else 'created', + 'active': True, + }) + commit_links_values = [] + for missing_commit in missing_commits: + commit_links_values.append({ + 'commit_id': missing_commit, + 'match_type': 'new', + }) + batch.commit_link_ids = [(0, 0, values) for values in commit_links_values] + if batch.state == 'ready' and all(slot.build_id.global_state in (False, 'running', 'done') for slot in batch.slot_ids): + batch.state = 'done' + + env.cache.invalidate() + progress.finish() + + #Build of type rebuild may point to same params as rebbuild? + + ################### + # Cleaning (performances) + ################### + # 1. avoid UPDATE "runbot_build" SET "commit_path_mode"=NULL WHERE "commit_path_mode"='soft' + + _logger.info('Pre-cleaning') + cr.execute('alter table runbot_build alter column commit_path_mode drop not null') + cr.execute('ANALYZE') + cr.execute("delete from runbot_build where local_state='duplicate'") # what about duplicate childrens? + _logger.info('End') diff --git a/runbot/migrations/13.0.5.0/pre-migration.py b/runbot/migrations/13.0.5.0/pre-migration.py new file mode 100644 index 00000000..46b8a9d1 --- /dev/null +++ b/runbot/migrations/13.0.5.0/pre-migration.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +import logging +_logger = logging.getLogger(__name__) + + +def migrate(cr, version): + # dependency is not correct since it will be all commits. This also free the name for a build dependant on another build params + + # those indexes are improving the branches deletion + cr.execute('CREATE INDEX ON runbot_branch (defined_sticky);') + cr.execute('CREATE INDEX ON runbot_build_dependency (closest_branch_id);') + + # Fix duplicate problems + cr.execute("UPDATE runbot_build SET duplicate_id = null WHERE duplicate_id > id") + cr.execute("UPDATE runbot_build SET local_state='done' WHERE duplicate_id IS NULL AND local_state = 'duplicate';") + # Remove builds without a repo + cr.execute("DELETE FROM runbot_build WHERE repo_id IS NULL") + + cr.execute("DELETE FROM ir_ui_view WHERE id IN (SELECT res_id FROM ir_model_data WHERE name = 'inherits_branch_in_menu' AND module = 'runbot')") + + # Fix branches + cr.execute("""DELETE FROM runbot_branch WHERE name SIMILAR TO 'refs/heads/\d+' RETURNING id,name;""") # Remove old bad branches named like PR + for branch_id, name in cr.fetchall(): + _logger.warning('Deleting branch id %s with name "%s"', branch_id, name) + + cr.execute("""SELECT branch_name,repo_id, count(*) AS nb FROM runbot_branch GROUP BY branch_name,repo_id HAVING count(*) > 1;""") # Branches with duplicate branch_name in same repo + for branch_name, repo_id, nb in cr.fetchall(): + cr.execute("""DELETE FROM runbot_branch WHERE (sticky='f' OR sticky IS NULL) AND branch_name=%s and repo_id=%s and name ~ 'refs/heads/.+/.+' RETURNING id,branch_name;""", (branch_name, repo_id)) + for branch_id, branch_name in cr.fetchall(): + _logger.warning('Deleting branch id %s with branch_name "%s"', branch_id, branch_name) + + # Raise in case of buggy PR's + cr.execute("SELECT id,name FROM runbot_branch WHERE name LIKE 'refs/pull/%' AND pull_head_name is null") + bad_prs = cr.fetchall() + if bad_prs: + for pr in bad_prs: + _logger.warning('PR with NULL pull_head_name found: %s (%s)', pr[1], pr[0]) + raise RuntimeError("Migration error", "Found %s PR's without pull_head_name" % len(bad_prs)) + + # avoid recompute of branch._comput_bundle_id otherwise, it cannot find xml data + cr.execute('ALTER TABLE runbot_branch ADD COLUMN bundle_id INTEGER;') + + # avoid recompute of pull_head_name wich is emptied during the recompute + cr.execute('ALTER TABLE runbot_branch ADD COLUMN pull_head_remote_id INTEGER;') + + cr.execute('ALTER TABLE runbot_branch ADD COLUMN is_pr BOOLEAN;') + cr.execute("""UPDATE runbot_branch SET is_pr = CASE WHEN name like 'refs/pull/%' THEN true ELSE false END;""") + + # delete runbot.repo inehrited views + cr.execute("DELETE FROM ir_ui_view WHERE inherit_id IN (SELECT id from ir_ui_view WHERE name = 'runbot.repo');") + return diff --git a/runbot/models/__init__.py b/runbot/models/__init__.py index f24dcb9f..fce22625 100644 --- a/runbot/models/__init__.py +++ b/runbot/models/__init__.py @@ -1,14 +1,24 @@ # -*- coding: utf-8 -*- -from . import repo +from . import batch from . import branch from . import build -from . import event -from . import build_dependency from . import build_config -from . import ir_cron -from . import host from . import build_error +from . import bundle +from . import commit +from . import database +from . import event +from . import host +from . import ir_cron +from . import ir_ui_view +from . import project +from . import repo +from . import res_config_settings +from . import runbot +from . import upgrade +from . import user +from . import version + from . import build_stat from . import build_stat_regex -from . import res_config_settings diff --git a/runbot/models/batch.py b/runbot/models/batch.py new file mode 100644 index 00000000..eabe8c7c --- /dev/null +++ b/runbot/models/batch.py @@ -0,0 +1,408 @@ +import time +import logging +import datetime +import subprocess + +from odoo import models, fields, api +from ..common import dt2time, s2human_long, pseudo_markdown + +_logger = logging.getLogger(__name__) + + +class Batch(models.Model): + _name = 'runbot.batch' + _description = "Bundle batch" + + last_update = fields.Datetime('Last ref update') + bundle_id = fields.Many2one('runbot.bundle', required=True, index=True, ondelete='cascade') + commit_link_ids = fields.Many2many('runbot.commit.link') + commit_ids = fields.Many2many('runbot.commit', compute='_compute_commit_ids') + slot_ids = fields.One2many('runbot.batch.slot', 'batch_id') + state = fields.Selection([('preparing', 'Preparing'), ('ready', 'Ready'), ('done', 'Done'), ('skipped', 'Skipped')]) + hidden = fields.Boolean('Hidden', default=False) + age = fields.Integer(compute='_compute_age', string='Build age') + category_id = fields.Many2one('runbot.category', default=lambda self: self.env.ref('runbot.default_category', raise_if_not_found=False)) + log_ids = fields.One2many('runbot.batch.log', 'batch_id') + has_warning = fields.Boolean("Has warning") + + @api.depends('commit_link_ids') + def _compute_commit_ids(self): + for batch in self: + batch.commit_ids = batch.commit_link_ids.commit_id + + @api.depends('create_date') + def _compute_age(self): + """Return the time between job start and now""" + for batch in self: + if batch.create_date: + batch.age = int(time.time() - dt2time(batch.create_date)) + else: + batch.buildage_age = 0 + + def get_formated_age(self): + return s2human_long(self.age) + + def _url(self): + self.ensure_one() + runbot_domain = self.env['runbot.runbot']._domain() + return "http://%s/runbot/batch/%s" % (runbot_domain, self.id) + + def _new_commit(self, branch, match_type='new'): + # if not the same hash for repo: + commit = branch.head + self.last_update = fields.Datetime.now() + for commit_link in self.commit_link_ids: + # case 1: a commit already exists for the repo (pr+branch, or fast push) + if commit_link.commit_id.repo_id == commit.repo_id: + if commit_link.commit_id.id != commit.id: + self._log('New head on branch %s during throttle phase: Replacing commit %s with %s', branch.name, commit_link.commit_id.name, commit.name) + commit_link.write({'commit_id': commit.id, 'branch_id': branch.id}) + elif not commit_link.branch_id.is_pr and branch.is_pr: + commit_link.branch_id = branch # Try to have a pr instead of branch on commit if possible ? + break + else: + self.write({'commit_link_ids': [(0, 0, { + 'commit_id': commit.id, + 'match_type': match_type, + 'branch_id': branch.id + })]}) + + def _skip(self): + for batch in self: + if batch.bundle_id.is_base or batch.state == 'done': + continue + batch.state = 'skipped' # done? + batch._log('Skipping batch') + for slot in batch.slot_ids: + slot.skipped = True + build = slot.build_id + testing_slots = build.slot_ids.filtered(lambda s: not s.skipped) + if not testing_slots: + if build.global_state == 'pending': + build._skip('Newer build found') + elif build.global_state in ('waiting', 'testing'): + build.killable = True + elif slot.link_type == 'created': + batches = testing_slots.mapped('batch_id') + _logger.info('Cannot skip build %s build is still in use in batches %s', build.id, batches.ids) + bundles = batches.mapped('bundle_id') - batch.bundle_id + if bundles: + batch._log('Cannot kill or skip build %s, build is used in another bundle: %s', build.id, bundles.mapped('name')) + + def _process(self): + for batch in self: + if batch.state == 'preparing' and batch.last_update < fields.Datetime.now() - datetime.timedelta(seconds=60): + batch._prepare() + elif batch.state == 'ready' and all(slot.build_id.global_state in (False, 'running', 'done') for slot in batch.slot_ids): + batch._log('Batch done') + batch.state = 'done' + + def _create_build(self, params): + """ + Create a build with given params_id if it does not already exists. + In the case that a very same build already exists that build is returned + """ + build = self.env['runbot.build'].search([('params_id', '=', params.id), ('parent_id', '=', False)], limit=1, order='id desc') + link_type = 'matched' + if build: + build.killable = False + else: + description = params.trigger_id.description if params.trigger_id.description else False + link_type = 'created' + build = self.env['runbot.build'].create({ + 'params_id': params.id, + 'description': description, + 'build_type': 'normal' if self.category_id == self.env.ref('runbot.default_category') else 'scheduled', + 'no_auto_run': self.bundle_id.no_auto_run, + }) + build._github_status(post_commit=False) + return link_type, build + + def _prepare(self, auto_rebase=False): + for level, message in self.bundle_id.consistency_warning(): + if level == "warning": + self.warning("Bundle warning: %s" % message) + + self.state = 'ready' + _logger.info('Preparing batch %s', self.id) + + bundle = self.bundle_id + project = bundle.project_id + if not bundle.version_id: + _logger.error('No version found on bundle %s in project %s', bundle.name, project.name) + triggers = self.env['runbot.trigger'].search([ # could be optimised for multiple batches. Ormcached method? + ('project_id', '=', project.id), + ('category_id', '=', self.category_id.id) + ]).filtered( + lambda t: not t.version_domain or \ + self.bundle_id.version_id.filtered_domain(t.get_version_domain()) + ) + + pushed_repo = self.commit_link_ids.mapped('commit_id.repo_id') + dependency_repos = triggers.mapped('dependency_ids') + all_repos = triggers.mapped('repo_ids') | dependency_repos + missing_repos = all_repos - pushed_repo + + ###################################### + # Find missing commits + ###################################### + def fill_missing(branch_commits, match_type): + if branch_commits: + for branch, commit in branch_commits.items(): # branch first in case pr is closed. + nonlocal missing_repos + if commit.repo_id in missing_repos: + if not branch.alive: + self._log("Skipping dead branch %s" % branch.name) + continue + values = { + 'commit_id': commit.id, + 'match_type': match_type, + 'branch_id': branch.id, + } + if match_type.startswith('base'): + values['base_commit_id'] = commit.id + values['merge_base_commit_id'] = commit.id + self.write({'commit_link_ids': [(0, 0, values)]}) + missing_repos -= commit.repo_id + + # CHECK branch heads consistency + branch_per_repo = {} + for branch in bundle.branch_ids.sorted(lambda b: (b.head.id, b.is_pr), reverse=True): + if branch.alive: + commit = branch.head + repo = commit.repo_id + if repo not in branch_per_repo: + branch_per_repo[repo] = branch + elif branch_per_repo[repo].head != branch.head and branch.alive: + obranch = branch_per_repo[repo] + self._log("Branch %s and branch %s in repo %s don't have the same head: %s ≠ %s", branch.dname, obranch.dname, repo.name, branch.head.name, obranch.head.name) + + # 1.1 FIND missing commit in bundle heads + if missing_repos: + fill_missing({branch: branch.head for branch in bundle.branch_ids.sorted(lambda b: (b.head.id, b.is_pr), reverse=True)}, 'head') + + # 1.2 FIND merge_base info for those commits + # use last not preparing batch to define previous repos_heads instead of branches heads: + # Will allow to have a diff info on base bundle, compare with previous bundle + last_base_batch = self.env['runbot.batch'].search([('bundle_id', '=', bundle.base_id.id), ('state', '!=', 'preparing'), ('category_id', '=', self.category_id.id), ('id', '!=', self.id)], order='id desc', limit=1) + base_head_per_repo = {commit.repo_id.id: commit for commit in last_base_batch.commit_ids} + self._update_commits_infos(base_head_per_repo) # set base_commit, diff infos, ... + + # 2. FIND missing commit in a compatible base bundle + if missing_repos and not bundle.is_base: + merge_base_commits = self.commit_link_ids.mapped('merge_base_commit_id') + if auto_rebase: + batch = last_base_batch + self._log('Using last done batch %s to define missing commits (automatic rebase)', batch.id) + else: + batch = False + link_commit = self.env['runbot.commit.link'].search([ + ('commit_id', 'in', merge_base_commits.ids), + ('match_type', 'in', ('new', 'head')) + ]) + batches = self.env['runbot.batch'].search([ + ('bundle_id', '=', bundle.base_id.id), + ('commit_link_ids', 'in', link_commit.ids), + ('state', '!=', 'preparing'), + ('category_id', '=', self.category_id.id) + ]).sorted(lambda b: (len(b.commit_ids & merge_base_commits), b.id), reverse=True) + if batches: + batch = batches[0] + self._log('Using batch %s to define missing commits', batch.id) + batch_exiting_commit = batch.commit_ids.filtered(lambda c: c.repo_id in merge_base_commits.repo_id) + not_matching = (batch_exiting_commit - merge_base_commits) + if not_matching: + message = 'Only %s out of %s merge base matched. You may want to rebase your branches to ensure compatibility' % (len(merge_base_commits)-len(not_matching), len(merge_base_commits)) + suggestions = [('Tip: rebase %s to %s' % (commit.repo_id.name, commit.name)) for commit in not_matching] + self.warning('%s\n%s' % (message, '\n'.join(suggestions))) + if batch: + fill_missing({link.branch_id: link.commit_id for link in batch.commit_link_ids}, 'base_match') + + # 3.1 FIND missing commit in base heads + if missing_repos: + if not bundle.is_base: + self._log('Not all commit found in bundle branches and base batch. Fallback on base branches heads.') + fill_missing({branch: branch.head for branch in self.bundle_id.base_id.branch_ids}, 'base_head') + + # 3.2 FIND missing commit in master base heads + if missing_repos: # this is to get an upgrade branch. + if not bundle.is_base: + self._log('Not all commit found in current version. Fallback on master branches heads.') + master_bundle = self.env['runbot.version']._get('master').with_context(project_id=self.bundle_id.project_id.id).base_bundle_id + fill_missing({branch: branch.head for branch in master_bundle.branch_ids}, 'base_head') + + # 4. FIND missing commit in foreign project + if missing_repos: + foreign_projects = dependency_repos.mapped('project_id') - project + if foreign_projects: + self._log('Not all commit found. Fallback on foreign base branches heads.') + foreign_bundles = bundle.search([('name', '=', bundle.name), ('project_id', 'in', foreign_projects.ids)]) + fill_missing({branch: branch.head for branch in foreign_bundles.mapped('branch_ids').sorted('is_pr', reverse=True)}, 'head') + if missing_repos: + foreign_bundles = bundle.search([('name', '=', bundle.base_id.name), ('project_id', 'in', foreign_projects.ids)]) + fill_missing({branch: branch.head for branch in foreign_bundles.mapped('branch_ids')}, 'base_head') + + # CHECK missing commit + if missing_repos: + _logger.warning('Missing repo %s for batch %s', missing_repos.mapped('name'), self.id) + + ###################################### + # Generate build params + ###################################### + if auto_rebase: + for commit_link in self.commit_link_ids: + commit_link.commit_id = commit_link.commit_id._rebase_on(commit_link.base_commit_id) + commit_link_by_repos = {commit_link.commit_id.repo_id.id: commit_link for commit_link in self.commit_link_ids} + bundle_repos = bundle.branch_ids.mapped('remote_id.repo_id') + version_id = self.bundle_id.version_id.id + project_id = self.bundle_id.project_id.id + config_by_trigger = {} + for trigger_custom in self.bundle_id.trigger_custom_ids: + config_by_trigger[trigger_custom.trigger_id.id] = trigger_custom.config_id + for trigger in triggers: + trigger_repos = trigger.repo_ids | trigger.dependency_ids + if trigger_repos & missing_repos: + self.warning('Missing commit for repo %s for trigger %s', (trigger_repos & missing_repos).mapped('name'), trigger.name) + continue + # in any case, search for an existing build + config = config_by_trigger.get(trigger.id, trigger.config_id) + + params_value = { + 'version_id': version_id, + 'extra_params': '', + 'config_id': config.id, + 'project_id': project_id, + 'trigger_id': trigger.id, # for future reference and access rights + 'config_data': {}, + 'commit_link_ids': [(6, 0, [commit_link_by_repos[repo.id].id for repo in trigger_repos])], + 'modules': bundle.modules + } + params_value['builds_reference_ids'] = trigger._reference_builds(bundle) + + params = self.env['runbot.build.params'].create(params_value) + + build = self.env['runbot.build'] + link_type = 'created' + if ((trigger.repo_ids & bundle_repos) or bundle.build_all or bundle.sticky) and not trigger.manual: # only auto link build if bundle has a branch for this trigger + link_type, build = self._create_build(params) + self.env['runbot.batch.slot'].create({ + 'batch_id': self.id, + 'trigger_id': trigger.id, + 'build_id': build.id, + 'params_id': params.id, + 'link_type': link_type, + }) + + ###################################### + # SKIP older batches + ###################################### + default_category = self.env.ref('runbot.default_category') + if not bundle.sticky and self.category_id == default_category: + skippable = self.env['runbot.batch'].search([ + ('bundle_id', '=', bundle.id), + ('state', '!=', 'done'), + ('id', '<', self.id), + ('category_id', '=', default_category.id) + ]) + skippable._skip() + + def _update_commits_infos(self, base_head_per_repo): + for link_commit in self.commit_link_ids: + commit = link_commit.commit_id + base_head = base_head_per_repo.get(commit.repo_id.id) + if not base_head: + self.warning('No base head found for repo %s', commit.repo_id.name) + continue + link_commit.base_commit_id = base_head + merge_base_sha = False + try: + link_commit.base_ahead = link_commit.base_behind = 0 + link_commit.file_changed = link_commit.diff_add = link_commit.diff_remove = 0 + link_commit.merge_base_commit_id = commit.id + if commit.name == base_head.name: + continue + merge_base_sha = commit.repo_id._git(['merge-base', commit.name, base_head.name]).strip() + merge_base_commit = self.env['runbot.commit']._get(merge_base_sha, commit.repo_id.id) + link_commit.merge_base_commit_id = merge_base_commit.id + + ahead, behind = commit.repo_id._git(['rev-list', '--left-right', '--count', '%s...%s' % (commit.name, base_head.name)]).strip().split('\t') + + link_commit.base_ahead = int(ahead) + link_commit.base_behind = int(behind) + + if merge_base_sha == commit.name: + continue + + # diff. Iter on --numstat, easier to parse than --shortstat summary + diff = commit.repo_id._git(['diff', '--numstat', merge_base_sha, commit.name]).strip() + if diff: + for line in diff.split('\n'): + link_commit.file_changed += 1 + add, remove, _ = line.split(None, 2) + try: + link_commit.diff_add += int(add) + link_commit.diff_remove += int(remove) + except ValueError: # binary files + pass + except subprocess.CalledProcessError: + self.warning('Commit info failed between %s and %s', commit.name, base_head.name) + + def warning(self, message, *args): + self.has_warning = True + _logger.warning('batch %s: ' + message, self.id, *args) + self._log(message, *args, level='WARNING') + + def _log(self, message, *args, level='INFO'): + self.env['runbot.batch.log'].create({ + 'batch_id': self.id, + 'message': message % args if args else message, + 'level': level, + }) + + +class BatchLog(models.Model): + _name = 'runbot.batch.log' + _description = 'Batch log' + + batch_id = fields.Many2one('runbot.batch', index=True) + message = fields.Text('Message') + level = fields.Char() + + + def _markdown(self): + """ Apply pseudo markdown parser for message. + """ + self.ensure_one() + return pseudo_markdown(self.message) + + + +class BatchSlot(models.Model): + _name = 'runbot.batch.slot' + _description = 'Link between a bundle batch and a build' + _order = 'trigger_id,id' + + _fa_link_type = {'created': 'hashtag', 'matched': 'link', 'rebuild': 'refresh'} + + batch_id = fields.Many2one('runbot.batch', index=True) + trigger_id = fields.Many2one('runbot.trigger', index=True) + build_id = fields.Many2one('runbot.build', index=True) + params_id = fields.Many2one('runbot.build.params', index=True, required=True) + link_type = fields.Selection([('created', 'Build created'), ('matched', 'Existing build matched'), ('rebuild', 'Rebuild')], required=True) # rebuild type? + active = fields.Boolean('Attached', default=True) + skipped = fields.Boolean('Skipped', default=False) + # rebuild, what to do: since build can be in multiple batch: + # - replace for all batch? + # - only available on batch and replace for batch only? + # - create a new bundle batch will new linked build? + + def fa_link_type(self): + return self._fa_link_type.get(self.link_type, 'exclamation-triangle') + + def _create_missing_build(self): + """Create a build when the slot does not have one""" + self.ensure_one() + if self.build_id: + return self.build_id + self.link_type, self.build_id = self.batch_id._create_build(self.params_id) + return self.build_id diff --git a/runbot/models/branch.py b/runbot/models/branch.py index c5a624fc..075ff206 100644 --- a/runbot/models/branch.py +++ b/runbot/models/branch.py @@ -1,354 +1,221 @@ # -*- coding: utf-8 -*- import logging import re -import time -from subprocess import CalledProcessError + +from collections import defaultdict from odoo import models, fields, api -from odoo.osv import expression _logger = logging.getLogger(__name__) -class runbot_branch(models.Model): - - _name = "runbot.branch" +class Branch(models.Model): + _name = 'runbot.branch' _description = "Branch" _order = 'name' - _sql_constraints = [('branch_repo_uniq', 'unique (name,repo_id)', 'The branch must be unique per repository !')] + _sql_constraints = [('branch_repo_uniq', 'unique (name,remote_id)', 'The branch must be unique per repository !')] - repo_id = fields.Many2one('runbot.repo', 'Repository', required=True, ondelete='cascade') - duplicate_repo_id = fields.Many2one('runbot.repo', 'Duplicate Repository', related='repo_id.duplicate_id',) - name = fields.Char('Ref Name', required=True) - branch_name = fields.Char(compute='_get_branch_infos', string='Branch', readonly=1, store=True) - branch_url = fields.Char(compute='_get_branch_url', string='Branch url', readonly=1) - pull_head_name = fields.Char(compute='_get_branch_infos', string='PR HEAD name', readonly=1, store=True) - target_branch_name = fields.Char(compute='_get_branch_infos', string='PR target branch', store=True) - pull_branch_name = fields.Char(compute='_compute_pull_branch_name', string='Branch display name') - sticky = fields.Boolean('Sticky') - closest_sticky = fields.Many2one('runbot.branch', compute='_compute_closest_sticky', string='Closest sticky') - defined_sticky = fields.Many2one('runbot.branch', string='Force sticky') - previous_version = fields.Many2one('runbot.branch', compute='_compute_previous_version', string='Previous version branch') - intermediate_stickies = fields.Many2many('runbot.branch', compute='_compute_intermediate_stickies', string='Intermediates stickies') - coverage_result = fields.Float(compute='_compute_coverage_result', type='Float', string='Last coverage', store=False) # non optimal search in loop, could we store this result ? or optimise - state = fields.Char('Status') - modules = fields.Char("Modules to Install", help="Comma-separated list of modules to install and test.") - priority = fields.Boolean('Build priority', default=False) - no_build = fields.Boolean("Forbid creation of build on this branch", default=False) - no_auto_build = fields.Boolean("Don't automatically build commit on this branch", default=False) - rebuild_requested = fields.Boolean("Request a rebuild", help="Rebuild the latest commit even when no_auto_build is set.", default=False) + name = fields.Char('Name', required=True) + remote_id = fields.Many2one('runbot.remote', 'Remote', required=True, ondelete='cascade') - branch_config_id = fields.Many2one('runbot.build.config', 'Branch Config') - config_id = fields.Many2one('runbot.build.config', 'Run Config', compute='_compute_config_id', inverse='_inverse_config_id') + head = fields.Many2one('runbot.commit', 'Head Commit', index=True) + head_name = fields.Char('Head name', related='head.name', store=True) - make_stats = fields.Boolean('Extract stats from logs', compute='_compute_make_stats', store=True) + reference_name = fields.Char(compute='_compute_reference_name', string='Bundle name', store=True) + bundle_id = fields.Many2one('runbot.bundle', 'Bundle', compute='_compute_bundle_id', store=True, ondelete='cascade', index=True) - @api.depends('sticky', 'defined_sticky', 'target_branch_name', 'name') - # won't be recompute if a new branch is marked as sticky or sticky is removed, but should be ok if not stored - def _compute_closest_sticky(self): + is_pr = fields.Boolean('IS a pr', required=True) + pull_head_name = fields.Char(compute='_compute_branch_infos', string='PR HEAD name', readonly=1, store=True) + pull_head_remote_id = fields.Many2one('runbot.remote', 'Pull head repository', compute='_compute_branch_infos', store=True, index=True) + target_branch_name = fields.Char(compute='_compute_branch_infos', string='PR target branch', store=True) + + reflog_ids = fields.One2many('runbot.ref.log', 'branch_id') + + branch_url = fields.Char(compute='_compute_branch_url', string='Branch url', readonly=1) + dname = fields.Char('Display name', compute='_compute_dname', search='_search_dname') + + alive = fields.Boolean('Alive', default=True) + + @api.depends('name', 'remote_id.short_name') + def _compute_dname(self): for branch in self: - if branch.sticky: - branch.closest_sticky = branch - elif branch.defined_sticky: - branch.closest_sticky = branch.defined_sticky # be carefull with loop - elif branch.target_branch_name: - corresponding_branch = self.search([('branch_name', '=', branch.target_branch_name), ('repo_id', '=', branch.repo_id.id)]) - branch.closest_sticky = corresponding_branch.closest_sticky + branch.dname = '%s:%s' % (branch.remote_id.short_name, branch.name) + + def _search_dname(self, operator, value): + if ':' not in value: + return [('name', operator, 'value')] + repo_short_name, branch_name = value.split(':') + owner, repo_name = repo_short_name.split('/') + return ['&', ('remote_id', '=', self.env['runbot.remote'].search([('owner', '=', owner), ('repo_name', '=', repo_name)]).id), ('name', operator, branch_name)] + + @api.depends('name', 'is_pr', 'target_branch_name', 'pull_head_name', 'pull_head_remote_id') + def _compute_reference_name(self): + """ + Unique reference for a branch inside a bundle. + - branch_name for branches + - branch name part of pull_head_name for pr if remote is known + - pull_head_name (organisation:branch_name) for external pr + """ + for branch in self: + if branch.is_pr: + _, name = branch.pull_head_name.split(':') + if branch.pull_head_remote_id: + branch.reference_name = name + else: + branch.reference_name = branch.pull_head_name # repo is not known, not in repo list must be an external pr, so use complete label + #if ':patch-' in branch.pull_head_name: + # branch.reference_name = '%s~%s' % (branch.pull_head_name, branch.name) else: - repo_ids = (branch.repo_id | branch.repo_id.duplicate_id).ids - self.env.cr.execute("select id from runbot_branch where sticky = 't' and repo_id = any(%s) and %s like name||'%%'", (repo_ids, branch.name or '')) - branch.closest_sticky = self.browse(self.env.cr.fetchone()) - - @api.depends('closest_sticky') #, 'closest_sticky.previous_version') - def _compute_previous_version(self): - for branch in self.sorted(key='sticky', reverse=True): - # orm does not support non_searchable.non_stored dependency. - # thus, the closest_sticky.previous_version dependency will log an error - # when previous_version is written. - # this dependency is usefull to make the compute recursive, avoiding to have - # both record and record.closest_sticky in self, in that order, making the record.previous_version - # empty in all cases. - # Sorting self on sticky will mitigate the problem. but it is still posible to - # have computation errors if defined_sticky is not sticky. (which is not a normal use case) - if branch.closest_sticky == branch: - repo_ids = (branch.repo_id | branch.repo_id.duplicate_id).ids - domain = [('branch_name', 'like', '%.0'), ('sticky', '=', True), ('branch_name', '!=', 'master'), ('repo_id', 'in', repo_ids)] - if branch.branch_name != 'master' and branch.id: - domain += [('id', '<', branch.id)] - branch.previous_version = self.search(domain, limit=1, order='id desc') - else: - branch.previous_version = branch.closest_sticky.previous_version - - @api.depends('previous_version', 'closest_sticky') - def _compute_intermediate_stickies(self): - for branch in self.sorted(key='sticky', reverse=True): - if branch.closest_sticky == branch: - if not branch.previous_version: - branch.intermediate_stickies = [(5, 0, 0)] - continue - repo_ids = (branch.repo_id | branch.repo_id.duplicate_id).ids - domain = [('id', '>', branch.previous_version.id), ('sticky', '=', True), ('branch_name', '!=', 'master'), ('repo_id', 'in', repo_ids)] - if branch.closest_sticky.branch_name != 'master' and branch.closest_sticky.id: - domain += [('id', '<', branch.closest_sticky.id)] - branch.intermediate_stickies = [(6, 0, self.search(domain, order='id desc').ids)] - else: - branch.intermediate_stickies = [(6, 0, branch.closest_sticky.intermediate_stickies.ids)] - - def _compute_config_id(self): - for branch in self: - if branch.branch_config_id: - branch.config_id = branch.branch_config_id - else: - branch.config_id = branch.repo_id.config_id - - def _inverse_config_id(self): - for branch in self: - branch.branch_config_id = branch.config_id - - def _compute_pull_branch_name(self): - for branch in self: - branch.pull_branch_name = branch.pull_head_name.split(':')[-1] if branch.pull_head_name else branch.branch_name - - @api.depends('sticky') - def _compute_make_stats(self): - for branch in self: - branch.make_stats = branch.sticky + branch.reference_name = branch.name @api.depends('name') - def _get_branch_infos(self, pull_info=None): - """compute branch_name, branch_url, pull_head_name and target_branch_name based on name""" + def _compute_branch_infos(self, pull_info=None): + """compute branch_url, pull_head_name and target_branch_name based on name""" + name_to_remote = {} + prs = self.filtered(lambda branch: branch.is_pr) + pull_info_dict = {} + if not pull_info and len(prs) > 30: # this is arbitrary, we should store # page on remote + pr_per_remote = defaultdict(list) + for pr in prs: + pr_per_remote[pr.remote_id].append(pr) + for remote, prs in pr_per_remote.items(): + _logger.info('Getting info in %s for %s pr using page scan', remote.name, len(prs)) + pr_names = set([pr.name for pr in prs]) + count = 0 + for result in remote._github('/repos/:owner/:repo/pulls?state=all&sort=updated&direction=desc', ignore_errors=True, recursive=True): + for info in result: + number = str(info.get('number')) + pr_names.discard(number) + pull_info_dict[(remote, number)] = info + count += 1 + if not pr_names: + break + if count > 100: + _logger.info('Not all pr found after 100 pages: remaining: %s', pr_names) + break + for branch in self: + branch.target_branch_name = False + branch.pull_head_name = False + branch.pull_head_remote_id = False if branch.name: - branch.branch_name = branch.name.split('/')[-1] - pi = pull_info or branch._get_pull_info() + pi = branch.is_pr and (pull_info or pull_info_dict.get((branch.remote_id, branch.name)) or branch._get_pull_info()) if pi: - branch.target_branch_name = pi['base']['ref'] - branch.pull_head_name = pi['head']['label'] - else: - branch.branch_name = '' + try: + branch.target_branch_name = pi['base']['ref'] + branch.pull_head_name = pi['head']['label'] + pull_head_repo_name = False + if pi['head'].get('repo'): + pull_head_repo_name = pi['head']['repo'].get('full_name') + if pull_head_repo_name not in name_to_remote: + owner, repo_name = pull_head_repo_name.split('/') + name_to_remote[pull_head_repo_name] = self.env['runbot.remote'].search([('owner', '=', owner), ('repo_name', '=', repo_name)], limit=1) + branch.pull_head_remote_id = name_to_remote[pull_head_repo_name] + except (TypeError, AttributeError): + _logger.exception('Error for pr %s using pull_info %s', branch.name, pi) + raise - def recompute_infos(self): - """ public method to recompute infos on demand """ - self._get_branch_infos() - - @api.depends('branch_name') - def _get_branch_url(self): - """compute the branch url based on branch_name""" + @api.depends('name', 'remote_id.base_url', 'is_pr') + def _compute_branch_url(self): + """compute the branch url based on name""" for branch in self: if branch.name: - if re.match('^[0-9]+$', branch.branch_name): - branch.branch_url = "https://%s/pull/%s" % (branch.repo_id.base, branch.branch_name) + if branch.is_pr: + branch.branch_url = "https://%s/pull/%s" % (branch.remote_id.base_url, branch.name) else: - branch.branch_url = "https://%s/tree/%s" % (branch.repo_id.base, branch.branch_name) + branch.branch_url = "https://%s/tree/%s" % (branch.remote_id.base_url, branch.name) else: branch.branch_url = '' + @api.depends('reference_name', 'remote_id.repo_id.project_id') + def _compute_bundle_id(self): + dummy = self.env.ref('runbot.bundle_dummy') + for branch in self: + if branch.bundle_id == dummy: + continue + name = branch.reference_name + project = branch.remote_id.repo_id.project_id or self.env.ref('runbot.main_project') + project.ensure_one() + bundle = self.env['runbot.bundle'].search([('name', '=', name), ('project_id', '=', project.id)]) + need_new_base = not bundle and branch.match_is_base(name) + if (bundle.is_base or need_new_base) and branch.remote_id != branch.remote_id.repo_id.main_remote_id: + _logger.warning('Trying to add a dev branch to base bundle, falling back on dummy bundle') + bundle = dummy + elif name and branch.remote_id and branch.remote_id.repo_id._is_branch_forbidden(name): + _logger.warning('Trying to add a forbidden branch, falling back on dummy bundle') + bundle = dummy + elif bundle.is_base and branch.is_pr: + _logger.warning('Trying to add pr to base bundle, falling back on dummy bundle') + bundle = dummy + elif not bundle: + values = { + 'name': name, + 'project_id': project.id, + } + if need_new_base: + values['is_base'] = True + + if branch.is_pr and branch.target_branch_name: # most likely external_pr, use target as version + base = self.env['runbot.bundle'].search([ + ('name', '=', branch.target_branch_name), + ('is_base', '=', True), + ('project_id', '=', project.id) + ]) + if base: + values['defined_base_id'] = base.id + if name: + bundle = self.env['runbot.bundle'].create(values) # this prevent creating a branch in UI + branch.bundle_id = bundle + + @api.model_create_multi + def create(self, value_list): + branches = super().create(value_list) + for branch in branches: + if branch.head: + self.env['runbot.ref.log'].create({'commit_id': branch.head.id, 'branch_id': branch.id}) + return branches + + def write(self, values): + if 'head' in values: + head = self.head + super().write(values) + if 'head' in values and head != self.head: + self.env['runbot.ref.log'].create({'commit_id': self.head.id, 'branch_id': self.id}) + def _get_pull_info(self): self.ensure_one() - repo = self.repo_id - if repo.token and self.name.startswith('refs/pull/'): - pull_number = self.name[len('refs/pull/'):] - return repo._github('/repos/:owner/:repo/pulls/%s' % pull_number, ignore_errors=True) or {} + remote = self.remote_id + if self.is_pr: + _logger.info('Getting info for %s', self.name) + return remote._github('/repos/:owner/:repo/pulls/%s' % self.name, ignore_errors=False) or {} # TODO catch and send a managable exception return {} - def _is_on_remote(self): - # check that a branch still exists on remote - self.ensure_one() - branch = self - repo = branch.repo_id - try: - repo._git(['ls-remote', '-q', '--exit-code', repo.name, branch.name]) - except CalledProcessError: + def ref(self): + return 'refs/%s/%s/%s' % ( + self.remote_id.remote_name, + 'pull' if self.is_pr else 'heads', + self.name + ) + + def recompute_infos(self): + """ public method to recompute infos on demand """ + self._compute_branch_infos() + + @api.model + def match_is_base(self, name): + """match against is_base_regex ir.config_parameter""" + if not name: return False - return True + icp = self.env['ir.config_parameter'].sudo() + regex = icp.get_param('runbot.runbot_is_base_regex', False) + if regex: + return re.match(regex, name) - def _get_last_branch_name_builds(self): - # naive way to find corresponding build, only matching branch name or pr pull_head_name and target_branch_name. - self.ensure_one() - domain = [] - if self.pull_head_name: - domain = [('pull_head_name', 'like', '%%:%s' % self.pull_head_name.split(':')[-1]), ('target_branch_name', '=', self.target_branch_name)] # pr matching pull head name - else: - domain = [('name', '=', self.name)] - #domain += [('id', '!=', self.branch_id.id)] - e = expression.expression(domain, self) - where_clause, where_params = e.to_sql() +class RefLog(models.Model): + _name = 'runbot.ref.log' + _description = 'Ref log' + _log_access = False - repo_ids = tuple(self.env['runbot.repo'].search([]).ids) # access rights - query = """ - SELECT max(b.id) - FROM runbot_build b - JOIN runbot_branch br ON br.id = b.branch_id - - WHERE b.branch_id IN ( - SELECT id from runbot_branch WHERE %s - ) - AND b.build_type IN ('normal', 'rebuild') - AND b.repo_id in %%s - AND (b.hidden = false OR b.hidden IS NULL) - AND b.parent_id IS NULL - AND (br.no_build = false OR br.no_build IS NULL) - GROUP BY b.repo_id - """ % where_clause - - self.env.cr.execute(query, where_params + [repo_ids]) - results = [r[0] for r in self.env.cr.fetchall()] - return self.env['runbot.build'].browse(results) - - @api.model_create_single - def create(self, vals): - if not vals.get('config_id') and ('use-coverage' in (vals.get('name') or '')): - coverage_config = self.env.ref('runbot.runbot_build_config_test_coverage', raise_if_not_found=False) - if coverage_config: - vals['config_id'] = coverage_config - - return super(runbot_branch, self).create(vals) - - def _get_last_coverage_build(self): - """ Return the last build with a coverage value > 0""" - self.ensure_one() - return self.env['runbot.build'].search([ - ('branch_id.id', '=', self.id), - ('local_state', 'in', ['done', 'running']), - ('coverage_result', '>=', 0.0), - ], order='sequence desc', limit=1) - - def _compute_coverage_result(self): - """ Compute the coverage result of the last build in branch """ - for branch in self: - last_build = branch._get_last_coverage_build() - branch.coverage_result = last_build.coverage_result or 0.0 - - def _get_closest_branch(self, target_repo_id): - """ - Return branch id of the closest branch based on name or pr informations. - """ - self.ensure_one() - Branch = self.env['runbot.branch'] - - repo = self.repo_id - name = self.pull_head_name or self.branch_name - - target_repo = self.env['runbot.repo'].browse(target_repo_id) - - target_repo_ids = [target_repo.id] - r = target_repo.duplicate_id - while r: - if r.id in target_repo_ids: - break - target_repo_ids.append(r.id) - r = r.duplicate_id - - _logger.debug('Search closest of %s (%s) in repos %r', name, repo.name, target_repo_ids) - - def sort_by_repo(branch): - return ( - not branch.sticky, # sticky first - target_repo_ids.index(branch.repo_id[0].id), - -1 * len(branch.branch_name), # little change of logic here, was only sorted on branch_name in prefix matching case before - -1 * branch.id - ) - - # 1. same name, not a PR - if not self.pull_head_name: # not a pr - domain = [ - ('repo_id', 'in', target_repo_ids), - ('branch_name', '=', self.branch_name), - ('name', '=like', 'refs/heads/%'), - ] - targets = Branch.search(domain, order='id DESC') - targets = sorted(targets, key=sort_by_repo) - if targets and targets[0]._is_on_remote(): - return (targets[0], 'exact') - - # 2. PR with head name equals - if self.pull_head_name: - domain = [ - ('repo_id', 'in', target_repo_ids), - ('pull_head_name', '=', self.pull_head_name), - ('name', '=like', 'refs/pull/%'), - ] - pulls = Branch.search(domain, order='id DESC') - pulls = sorted(pulls, key=sort_by_repo) - for pull in Branch.browse([pu['id'] for pu in pulls]): - pi = pull._get_pull_info() - if pi.get('state') == 'open': - if ':' in self.pull_head_name: - (repo_name, pr_branch_name) = self.pull_head_name.split(':') - repo = self.env['runbot.repo'].browse(target_repo_ids).filtered(lambda r: ':%s/' % repo_name in r.name) - # most of the time repo will be pull.repo_id.duplicate_id, but it is still possible to have a pr pointing the same repo - if repo: - pr_branch_ref = 'refs/heads/%s' % pr_branch_name - pr_branch = self._get_or_create_branch(repo.id, pr_branch_ref) - # use _get_or_create_branch in case a pr is scanned before pull_head_name branch. - return (pr_branch, 'exact PR') - return (pull, 'exact PR') - - # 4.Match a PR in enterprise without community PR - # Moved before 3 because it makes more sense - if self.pull_head_name: - if self.name.startswith('refs/pull'): - if ':' in self.pull_head_name: - (repo_name, pr_branch_name) = self.pull_head_name.split(':') - repos = self.env['runbot.repo'].browse(target_repo_ids).filtered(lambda r: ':%s/' % repo_name in r.name) - else: - pr_branch_name = self.pull_head_name - repos = target_repo - if repos: - duplicate_branch_name = 'refs/heads/%s' % pr_branch_name - domain = [ - ('repo_id', 'in', tuple(repos.ids)), - ('branch_name', '=', pr_branch_name), - ('pull_head_name', '=', False), - ] - targets = Branch.search(domain, order='id DESC') - targets = sorted(targets, key=sort_by_repo) - if targets and targets[0]._is_on_remote(): - return (targets[0], 'no PR') - - # 3. Match a branch which is the dashed-prefix of current branch name - if not self.pull_head_name: - if '-' in self.branch_name: - name_start = 'refs/heads/%s' % self.branch_name.split('-')[0] - domain = [('repo_id', 'in', target_repo_ids), ('name', '=like', '%s%%' % name_start)] - branches = Branch.search(domain, order='id DESC') - branches = sorted(branches, key=sort_by_repo) - for branch in branches: - if self.branch_name.startswith('%s-' % branch.branch_name) and branch._is_on_remote(): - return (branch, 'prefix') - - # 5. last-resort value - if self.target_branch_name: - default_target_ref = 'refs/heads/%s' % self.target_branch_name - default_branch = self.search([('repo_id', 'in', target_repo_ids), ('name', '=', default_target_ref)], limit=1) - if default_branch: - return (default_branch, 'pr_target') - - default_target_ref = 'refs/heads/master' - default_branch = self.search([('repo_id', 'in', target_repo_ids), ('name', '=', default_target_ref)], limit=1) - # we assume that master will always exists - return (default_branch, 'default') - - def _branch_exists(self, branch_id): - Branch = self.env['runbot.branch'] - branch = Branch.search([('id', '=', branch_id)]) - if branch and branch[0]._is_on_remote(): - return True - return False - - def _get_or_create_branch(self, repo_id, name): - res = self.search([('repo_id', '=', repo_id), ('name', '=', name)], limit=1) - if res: - return res - _logger.warning('creating missing branch %s', name) - Branch = self.env['runbot.branch'] - branch = Branch.create({'repo_id': repo_id, 'name': name}) - return branch - - def toggle_request_branch_rebuild(self): - for branch in self: - if not branch.rebuild_requested: - branch.rebuild_requested = True - branch.repo_id.sudo().set_hook_time(time.time()) - else: - branch.rebuild_requested = False + commit_id = fields.Many2one('runbot.commit', index=True) + branch_id = fields.Many2one('runbot.branch', index=True) + date = fields.Datetime(default=fields.Datetime.now) diff --git a/runbot/models/build.py b/runbot/models/build.py index 001a595c..6660f14b 100644 --- a/runbot/models/build.py +++ b/runbot/models/build.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- import fnmatch -import glob import logging import pwd import re @@ -8,14 +7,15 @@ import shutil import subprocess import time import datetime -from ..common import dt2time, fqdn, now, grep, local_pgadmin_cursor, s2human, Commit, dest_reg, os, list_local_dbs, pseudo_markdown -from ..container import docker_build, docker_stop, docker_state, Command +import hashlib +from ..common import dt2time, fqdn, now, grep, local_pgadmin_cursor, s2human, dest_reg, os, list_local_dbs, pseudo_markdown, RunbotException +from ..container import docker_stop, docker_state, Command from ..fields import JsonDictField -from odoo.addons.runbot.models.repo import RunbotException -from odoo import models, fields, api, registry +from odoo import models, fields, api from odoo.exceptions import UserError, ValidationError from odoo.http import request from odoo.tools import appdirs +from odoo.tools.safe_eval import safe_eval from collections import defaultdict from psycopg2 import sql from subprocess import CalledProcessError @@ -23,152 +23,209 @@ from subprocess import CalledProcessError _logger = logging.getLogger(__name__) result_order = ['ok', 'warn', 'ko', 'skipped', 'killed', 'manually_killed'] -state_order = ['pending', 'testing', 'waiting', 'running', 'duplicate', 'done'] +state_order = ['pending', 'testing', 'waiting', 'running', 'done'] COPY_WHITELIST = [ - "branch_id", - "repo_id", - "name", + "params_id", "description", - "date", - "author", - "author_email", - "committer", - "committer_email", - "subject", - "config_data", - "extra_params", "build_type", "parent_id", - "hidden", - "dependency_ids", - "config_id", "orphan_result", - "commit_path_mode", ] + def make_selection(array): - def format(string): - return (string, string.replace('_', ' ').capitalize()) - return [format(elem) if isinstance(elem, str) else elem for elem in array] + return [(elem, elem.replace('_', ' ').capitalize()) if isinstance(elem, str) else elem for elem in array] -class runbot_build(models.Model): - _name = "runbot.build" +class BuildParameters(models.Model): + _name = 'runbot.build.params' + _description = "All information used by a build to run, should be unique and set on create only" + + # on param or on build? + # execution parametter + commit_link_ids = fields.Many2many('runbot.commit.link', copy=True) + commit_ids = fields.Many2many('runbot.commit', compute='_compute_commit_ids') + version_id = fields.Many2one('runbot.version', required=True, index=True) + project_id = fields.Many2one('runbot.project', required=True, index=True) # for access rights + trigger_id = fields.Many2one('runbot.trigger', index=True) # for access rights + category = fields.Char('Category', index=True) # normal vs nightly vs weekly, ... + # other informations + extra_params = fields.Char('Extra cmd args') + config_id = fields.Many2one('runbot.build.config', 'Run Config', required=True, + default=lambda self: self.env.ref('runbot.runbot_build_config_default', raise_if_not_found=False), index=True) + config_data = JsonDictField('Config Data') + + build_ids = fields.One2many('runbot.build', 'params_id') + builds_reference_ids = fields.Many2many('runbot.build', relation='runbot_build_params_references', copy=True) + modules = fields.Char('Modules') + + upgrade_to_build_id = fields.Many2one('runbot.build', index=True) # use to define sources to use with upgrade script + upgrade_from_build_id = fields.Many2one('runbot.build', index=True) # use to download db + dump_db = fields.Many2one('runbot.database', index=True) # use to define db to download + + fingerprint = fields.Char('Fingerprint', compute='_compute_fingerprint', store=True, index=True) + + _sql_constraints = [ + ('unique_fingerprint', 'unique (fingerprint)', 'avoid duplicate params'), + ] + + # @api.depends('version_id', 'project_id', 'extra_params', 'config_id', 'config_data', 'modules', 'commit_link_ids', 'builds_reference_ids') + def _compute_fingerprint(self): + for param in self: + cleaned_vals = { + 'version_id': param.version_id.id, + 'project_id': param.project_id.id, + 'trigger_id': param.trigger_id.id, + 'extra_params': param.extra_params or '', + 'config_id': param.config_id.id, + 'config_data': param.config_data.dict, + 'modules': param.modules or '', + 'commit_link_ids': sorted(param.commit_link_ids.commit_id.ids), + 'builds_reference_ids': sorted(param.builds_reference_ids.ids), + 'upgrade_from_build_id': param.upgrade_from_build_id.id, + 'upgrade_to_build_id': param.upgrade_to_build_id.id, + 'dump_db': param.dump_db.id, + } + param.fingerprint = hashlib.sha256(str(cleaned_vals).encode('utf8')).hexdigest() + + @api.depends('commit_link_ids') + def _compute_commit_ids(self): + for params in self: + params.commit_ids = params.commit_link_ids.commit_id + + def create(self, values): + params = self.new(values) + match = self._find_existing(params.fingerprint) + if match: + return match + values = self._convert_to_write(params._cache) + return super().create(values) + + def _find_existing(self, fingerprint): + return self.env['runbot.build.params'].search([('fingerprint', '=', fingerprint)], limit=1) + + def write(self, vals): + raise UserError('Params cannot be modified') + + +class BuildResult(models.Model): + # remove duplicate management + # instead, link between bundle_batch and build + # kill -> only available from bundle. + # kill -> actually detach the build from the bundle + # rebuild: detach and create a new link (a little like exact rebuild), + # if a build is detached from all bundle, kill it + # nigktly? + + _name = 'runbot.build' _description = "Build" _parent_store = True _order = 'id desc' _rec_name = 'id' - branch_id = fields.Many2one('runbot.branch', 'Branch', required=True, ondelete='cascade', index=True) - repo_id = fields.Many2one(related='branch_id.repo_id', readonly=True, store=True) - name = fields.Char('Revno', required=True) + # all displayed info removed. How to replace that? + # -> commit corresponding to repo of trigger_id5 + # -> display all? + + params_id = fields.Many2one('runbot.build.params', required=True, index=True, auto_join=True) + no_auto_run = fields.Boolean('No run') + # could be a default value, but possible to change it to allow duplicate accros branches + description = fields.Char('Description', help='Informative description') md_description = fields.Char(compute='_compute_md_description', String='MD Parsed Description', help='Informative description markdown parsed') - host = fields.Char('Host') - port = fields.Integer('Port') - dest = fields.Char(compute='_compute_dest', type='char', string='Dest', readonly=1, store=True) - domain = fields.Char(compute='_compute_domain', type='char', string='URL') - date = fields.Datetime('Commit date') - author = fields.Char('Author') - author_email = fields.Char('Author Email') - committer = fields.Char('Committer') - committer_email = fields.Char('Committer Email') - subject = fields.Text('Subject') - sequence = fields.Integer('Sequence') - log_ids = fields.One2many('ir.logging', 'build_id', string='Logs') - error_log_ids = fields.One2many('ir.logging', 'build_id', domain=[('level', 'in', ['WARNING', 'ERROR', 'CRITICAL'])], string='Error Logs') - config_data = JsonDictField('Config Data') - stat_ids = fields.One2many('runbot.build.stat', 'build_id', strings='Statistics values') + + # Related fields for convenience + version_id = fields.Many2one('runbot.version', related='params_id.version_id', store=True, index=True) + config_id = fields.Many2one('runbot.build.config', related='params_id.config_id', store=True, index=True) + trigger_id = fields.Many2one('runbot.trigger', related='params_id.trigger_id', store=True, index=True) # state machine - global_state = fields.Selection(make_selection(state_order), string='Status', compute='_compute_global_state', store=True) local_state = fields.Selection(make_selection(state_order), string='Build Status', default='pending', required=True, index=True) global_result = fields.Selection(make_selection(result_order), string='Result', compute='_compute_global_result', store=True) local_result = fields.Selection(make_selection(result_order), string='Build Result') triggered_result = fields.Selection(make_selection(result_order), string='Triggered Result') # triggered by db only - last_github_state = fields.Char('Last github status', readonly=True) requested_action = fields.Selection([('wake_up', 'To wake up'), ('deathrow', 'To kill')], string='Action requested', index=True) + # web infos + host = fields.Char('Host') + port = fields.Integer('Port') + dest = fields.Char(compute='_compute_dest', type='char', string='Dest', readonly=1, store=True) + domain = fields.Char(compute='_compute_domain', type='char', string='URL') + # logs and stats + log_ids = fields.One2many('ir.logging', 'build_id', string='Logs') + error_log_ids = fields.One2many('ir.logging', 'build_id', domain=[('level', 'in', ['WARNING', 'ERROR', 'CRITICAL'])], string='Error Logs') + stat_ids = fields.One2many('runbot.build.stat', 'build_id', strings='Statistics values') + log_list = fields.Char('Comma separted list of step_ids names with logs', compute="_compute_log_list", store=True) - nb_pending = fields.Integer("Number of pending in queue", default=0) - nb_testing = fields.Integer("Number of test slot use", default=0) - nb_running = fields.Integer("Number of run slot use", default=0) - - # should we add a stored field for children results? active_step = fields.Many2one('runbot.build.config.step', 'Active step') job = fields.Char('Active step display name', compute='_compute_job') job_start = fields.Datetime('Job start') job_end = fields.Datetime('Job end') - gc_date = fields.Datetime('Local cleanup date', compute='_compute_gc_date') - gc_delay = fields.Integer('Cleanup Delay', help='Used to compute gc_date') build_start = fields.Datetime('Build start') build_end = fields.Datetime('Build end') job_time = fields.Integer(compute='_compute_job_time', string='Job time') build_time = fields.Integer(compute='_compute_build_time', string='Build time') + + gc_date = fields.Datetime('Local cleanup date', compute='_compute_gc_date') + gc_delay = fields.Integer('Cleanup Delay', help='Used to compute gc_date') + build_age = fields.Integer(compute='_compute_build_age', string='Build age') - duplicate_id = fields.Many2one('runbot.build', 'Corresponding Build', index=True) - revdep_build_ids = fields.Many2many('runbot.build', 'runbot_rev_dep_builds', - column1='rev_dep_id', column2='dependent_id', - string='Builds that depends on this build') - extra_params = fields.Char('Extra cmd args') + coverage = fields.Boolean('Code coverage was computed for this build') coverage_result = fields.Float('Coverage result', digits=(5, 2)) build_type = fields.Selection([('scheduled', 'This build was automatically scheduled'), ('rebuild', 'This build is a rebuild'), ('normal', 'normal build'), - ('indirect', 'Automatic rebuild'), + ('indirect', 'Automatic rebuild'), # TODO cleanup remove ], default='normal', string='Build type') + + # what about parent_id and duplmicates? + # -> always create build, no duplicate? (make sence since duplicate should be the parent and params should be inherited) + # -> build_link ? + parent_id = fields.Many2one('runbot.build', 'Parent Build', index=True) parent_path = fields.Char('Parent path', index=True) # should we add a has children stored boolean? - hidden = fields.Boolean("Don't show build on main page", default=False) # index? children_ids = fields.One2many('runbot.build', 'parent_id') - dependency_ids = fields.One2many('runbot.build.dependency', 'build_id', copy=True) - config_id = fields.Many2one('runbot.build.config', 'Run Config', required=True, default=lambda self: self.env.ref('runbot.runbot_build_config_default', raise_if_not_found=False)) - real_build = fields.Many2one('runbot.build', 'Real Build', help="duplicate_id or self", compute='_compute_real_build') - log_list = fields.Char('Comma separted list of step_ids names with logs', compute="_compute_log_list", store=True) + # config of top_build is inherithed from params, but subbuild will have different configs + orphan_result = fields.Boolean('No effect on the parent result', default=False) - commit_path_mode = fields.Selection([('rep_sha', 'repo name + sha'), - ('soft', 'repo name only'), - ], - default='soft', - string='Source export path mode') build_url = fields.Char('Build url', compute='_compute_build_url', store=False) build_error_ids = fields.Many2many('runbot.build.error', 'runbot_build_error_ids_runbot_build_rel', string='Errors') keep_running = fields.Boolean('Keep running', help='Keep running') log_counter = fields.Integer('Log Lines counter', default=100) - @api.depends('config_id') + slot_ids = fields.One2many('runbot.batch.slot', 'build_id') + killable = fields.Boolean('Killable') + + database_ids = fields.One2many('runbot.database', 'build_id') + + @api.depends('params_id.config_id') def _compute_log_list(self): # storing this field because it will be access trhoug repo viewn and keep track of the list at create for build in self: - build.log_list = ','.join({step.name for step in build.config_id.step_ids() if step._has_log()}) + build.log_list = ','.join({step.name for step in build.params_id.config_id.step_ids() if step._has_log()}) + # should be moved - @api.depends('children_ids.global_state', 'local_state', 'duplicate_id.global_state') + @api.depends('children_ids.global_state', 'local_state') def _compute_global_state(self): for record in self: - if record.duplicate_id: - record.global_state = record.duplicate_id.global_state - elif record.global_state == 'done' and self.local_state == 'done': - # avoid to recompute if done, mostly important whith many orphan childrens - record.global_state = 'done' - else: - waiting_score = record._get_state_score('waiting') - children_ids = [child for child in record.children_ids if not child.orphan_result] - if record._get_state_score(record.local_state) > waiting_score and children_ids: # if finish, check children - children_state = record._get_youngest_state([child.global_state for child in children_ids]) - if record._get_state_score(children_state) > waiting_score: - record.global_state = record.local_state - else: - record.global_state = 'waiting' - else: + waiting_score = record._get_state_score('waiting') + children_ids = [child for child in record.children_ids if not child.orphan_result] + if record._get_state_score(record.local_state) > waiting_score and children_ids: # if finish, check children + children_state = record._get_youngest_state([child.global_state for child in children_ids]) + if record._get_state_score(children_state) > waiting_score: record.global_state = record.local_state + else: + record.global_state = 'waiting' + else: + record.global_state = record.local_state @api.depends('gc_delay', 'job_end') def _compute_gc_date(self): @@ -200,14 +257,10 @@ class runbot_build(models.Model): def _get_state_score(self, result): return state_order.index(result) - # random note: need to count hidden in pending and testing build displayed in frontend - - @api.depends('children_ids.global_result', 'local_result', 'duplicate_id.global_result', 'children_ids.orphan_result') + @api.depends('children_ids.global_result', 'local_result', 'children_ids.orphan_result') def _compute_global_result(self): for record in self: - if record.duplicate_id: - record.global_result = record.duplicate_id.global_result - elif record.local_result and record._get_result_score(record.local_result) >= record._get_result_score('ko'): + if record.local_result and record._get_result_score(record.local_result) >= record._get_result_score('ko'): record.global_result = record.local_result else: children_ids = [child for child in record.children_ids if not child.orphan_result] @@ -230,139 +283,21 @@ class runbot_build(models.Model): def _get_result_score(self, result): return result_order.index(result) - @api.depends('active_step', 'duplicate_id.active_step') + @api.depends('active_step') def _compute_job(self): for build in self: - build.job = build.real_build.active_step.name - - @api.depends('duplicate_id') - def _compute_real_build(self): - for build in self: - build.real_build = build.duplicate_id or build + build.job = build.active_step.name def copy_data(self, default=None): - values = super().copy_data(default)[0] - values = {key: value for key, value in values.items() if key in COPY_WHITELIST} + values = super().copy_data(default)[0] or {} + default = dict(default or []) + values = {key: value for key, value in values.items() if (key in COPY_WHITELIST or key in default)} values.update({ - 'host': 'PAUSED', # hack to keep the build in pending waiting for a manual update. Todo: add a paused state instead + 'host': 'PAUSED', # hack to keep the build in pending waiting for a manual update. Todo: add a paused flag instead 'local_state': 'pending', }) return [values] - def copy(self, default=None): - return super(runbot_build, self.with_context(force_rebuild=True)).copy(default) - - - @api.model_create_single - def create(self, vals): - if not 'config_id' in vals: - branch = self.env['runbot.branch'].browse(vals.get('branch_id')) - vals['config_id'] = branch.config_id.id - build_id = super(runbot_build, self).create(vals) - extra_info = {} - if not build_id.sequence: - extra_info['sequence'] = build_id.id - - # compute dependencies - repo = build_id.repo_id - dep_create_vals = [] - build_id._log('create', 'Build created') # mainly usefull to log creation time - if not vals.get('dependency_ids'): - params = build_id._get_params() # calling git show, dont call that if not usefull. - for extra_repo in repo.dependency_ids: - repo_name = extra_repo.short_name - last_commit = params['dep'][repo_name] # not name - if last_commit: - match_type = 'params' - build_closest_branch = False - message = 'Dependency for repo %s defined in commit message' % (repo_name) - else: - (build_closest_branch, match_type) = build_id.branch_id._get_closest_branch(extra_repo.id) - closest_name = build_closest_branch.name - closest_branch_repo = build_closest_branch.repo_id - last_commit = closest_branch_repo._git_rev_parse(closest_name) - message = 'Dependency for repo %s defined from closest branch %s' % (repo_name, closest_name) - try: - commit_oneline = extra_repo._git(['show', '--pretty="%H -- %s"', '-s', last_commit]).strip() - except CalledProcessError: - commit_oneline = 'Commit %s not found on build creation' % last_commit - # possible that oneline fail if given from commit message. Do it on build? or keep this information - - build_id._log('create', '%s: %s' % (message, commit_oneline)) - - dep_create_vals.append({ - 'build_id': build_id.id, - 'dependecy_repo_id': extra_repo.id, - 'closest_branch_id': build_closest_branch and build_closest_branch.id, - 'dependency_hash': last_commit, - 'match_type': match_type, - }) - for dep_vals in dep_create_vals: - self.env['runbot.build.dependency'].sudo().create(dep_vals) - - if not self.env.context.get('force_rebuild') and not vals.get('build_type') == 'rebuild': - # detect duplicate - duplicate_id = None - domain = [ - ('repo_id', 'in', (build_id.repo_id.duplicate_id.id, build_id.repo_id.id)), # before, was only looking in repo.duplicate_id looks a little better to search in both - ('id', '!=', build_id.id), - ('name', '=', build_id.name), - ('duplicate_id', '=', False), - # ('build_type', '!=', 'indirect'), # in case of performance issue, this little fix may improve performance a little but less duplicate will be detected when pushing an empty branch on repo with duplicates - '|', ('local_result', '=', False), ('local_result', '!=', 'skipped'), # had to reintroduce False posibility for selections - ('config_id', '=', build_id.config_id.id), - ('extra_params', '=', build_id.extra_params), - ('config_data', '=', build_id.config_data or False), - ] - candidates = self.search(domain) - - nb_deps = len(build_id.dependency_ids) - if candidates and nb_deps: - # check that all depedencies are matching. - - # Note: We avoid to compare closest_branch_id, because the same hash could be found on - # 2 different branches (pr + branch). - # But we may want to ensure that the hash is comming from the right repo, we dont want to compare community - # hash with enterprise hash. - # this is unlikely to happen so branch comparaison is disabled - self.env.cr.execute(""" - SELECT DUPLIDEPS.build_id - FROM runbot_build_dependency as DUPLIDEPS - JOIN runbot_build_dependency as BUILDDEPS - ON BUILDDEPS.dependency_hash = DUPLIDEPS.dependency_hash - AND BUILDDEPS.build_id = %s - AND DUPLIDEPS.build_id in %s - GROUP BY DUPLIDEPS.build_id - HAVING COUNT(DUPLIDEPS.*) = %s - ORDER BY DUPLIDEPS.build_id -- remove this in case of performance issue, not so usefull - LIMIT 1 - """, (build_id.id, tuple(candidates.ids), nb_deps)) - filtered_candidates_ids = self.env.cr.fetchall() - - if filtered_candidates_ids: - duplicate_id = filtered_candidates_ids[0] - else: - duplicate_id = candidates[0].id if candidates else False - - if duplicate_id: - extra_info.update({'local_state': 'duplicate', 'duplicate_id': duplicate_id}) - # maybe update duplicate priority if needed - - docker_source_folders = set() - for commit in build_id._get_all_commit(): - docker_source_folder = build_id._docker_source_folder(commit) - if docker_source_folder in docker_source_folders: - extra_info['commit_path_mode'] = 'rep_sha' - continue - docker_source_folders.add(docker_source_folder) - - if extra_info: - build_id.write(extra_info) - - if build_id.local_state == 'duplicate' and build_id.duplicate_id.global_state in ('running', 'done'): - build_id._github_status() - return build_id - def write(self, values): # some validation to ensure db consistency if 'local_state' in values: @@ -372,35 +307,57 @@ class runbot_build(models.Model): local_result = values.get('local_result') for build in self: assert not local_result or local_result == self._get_worst_result([build.local_result, local_result]) # dont write ok on a warn/error build - res = super(runbot_build, self).write(values) - for build in self: - assert bool(not build.duplicate_id) ^ (build.local_state == 'duplicate') # don't change duplicate state without removing duplicate id. - if 'log_counter' in values: # not 100% usefull but more correct ( see test_ir_logging) + res = super(BuildResult, self).write(values) + if 'log_counter' in values: # not 100% usefull but more correct ( see test_ir_logging) self.flush() return res + def _add_child(self, param_values, orphan=False, description=False, additionnal_commit_links=False): + if additionnal_commit_links: + commit_link_ids = self.params_id.commit_link_ids + commit_link_ids |= additionnal_commit_links + param_values['commit_link_ids'] = commit_link_ids + return self.create({ + 'params_id': self.params_id.copy(param_values).id, + 'parent_id': self.id, + 'build_type': self.build_type, + 'description': description, + 'orphan_result': orphan, + }) + + def result_multi(self): + if all(build.global_result == 'ok' or not build.global_result for build in self): + return 'ok' + if any(build.global_result in ('skipped', 'killed', 'manually_killed') for build in self): + return 'killed' + if any(build.global_result == 'ko' for build in self): + return 'ko' + if any(build.global_result == 'warning' for build in self): + return 'warning' + return 'ko' # ? + def update_build_end(self): for build in self: build.build_end = now() if build.parent_id and build.parent_id.local_state in ('running', 'done'): - build.parent_id.update_build_end() + build.parent_id.update_build_end() - @api.depends('name', 'branch_id.name') + @api.depends('params_id.version_id.name') def _compute_dest(self): for build in self: - if build.name: - nickname = build.branch_id.name.split('/')[2] + if build.id: + nickname = build.params_id.version_id.name nickname = re.sub(r'"|\'|~|\:', '', nickname) nickname = re.sub(r'_|/|\.', '-', nickname) - build.dest = ("%05d-%s-%s" % (build.id or 0, nickname[:32], build.name[:6])).lower() + build.dest = ("%05d-%s" % (build.id or 0, nickname[:32])).lower() - @api.depends('repo_id', 'port', 'dest', 'host', 'duplicate_id.domain') + @api.depends('port', 'dest', 'host') def _compute_domain(self): - domain = self.env['ir.config_parameter'].sudo().get_param('runbot.runbot_domain', fqdn()) + icp = self.env['ir.config_parameter'].sudo() + nginx = icp.get_param('runbot.runbot_nginx', False) # or just force nginx? + domain = icp.get_param('runbot.runbot_domain', fqdn()) for build in self: - if build.duplicate_id: - build.domain = build.duplicate_id.domain - elif build.repo_id.nginx: + if nginx: build.domain = "%s.%s" % (build.dest, build.host) else: build.domain = "%s:%s" % (domain, build.port) @@ -409,42 +366,37 @@ class runbot_build(models.Model): for build in self: build.build_url = "/runbot/build/%s" % build.id - @api.depends('job_start', 'job_end', 'duplicate_id.job_time') + @api.depends('job_start', 'job_end') def _compute_job_time(self): """Return the time taken by the tests""" for build in self: - if build.duplicate_id: - build.job_time = build.duplicate_id.job_time - elif build.job_end and build.job_start: + if build.job_end and build.job_start: build.job_time = int(dt2time(build.job_end) - dt2time(build.job_start)) elif build.job_start: build.job_time = int(time.time() - dt2time(build.job_start)) else: build.job_time = 0 - @api.depends('build_start', 'build_end', 'global_state', 'duplicate_id.build_time') + @api.depends('build_start', 'build_end', 'global_state') def _compute_build_time(self): for build in self: - if build.duplicate_id: - build.build_time = build.duplicate_id.build_time - elif build.build_end and build.global_state != 'waiting': + if build.build_end and build.global_state != 'waiting': build.build_time = int(dt2time(build.build_end) - dt2time(build.build_start)) elif build.build_start: build.build_time = int(time.time() - dt2time(build.build_start)) else: build.build_time = 0 - @api.depends('job_start', 'duplicate_id.build_age') + @api.depends('job_start') def _compute_build_age(self): """Return the time between job start and now""" for build in self: - if build.duplicate_id: - build.build_age = build.duplicate_id.build_age - elif build.job_start: + if build.job_start: build.build_age = int(time.time() - dt2time(build.build_start)) else: build.build_age = 0 + # TODO move this logic to batch: use param to check consistency of found commits def _get_params(self): try: message = self.repo_id._git(['show', '-s', self.name]) @@ -458,65 +410,45 @@ class runbot_build(models.Model): params['dep'][result[0]] = result[1] return params - def _copy_dependency_ids(self): - return [(0, 0, { - 'match_type': dep.match_type, - 'closest_branch_id': dep.closest_branch_id and dep.closest_branch_id.id, - 'dependency_hash': dep.dependency_hash, - 'dependecy_repo_id': dep.dependecy_repo_id.id, - }) for dep in self.dependency_ids] + def _rebuild(self, message=None): + """Force a rebuild and return a recordset of builds""" + self.ensure_one() + # TODO don't rebuild if there is a more recent build for this params? + values = { + 'params_id': self.params_id.id, + 'build_type': 'rebuild', + } + if self.parent_id: + values.update({ + 'parent_id': self.parent_id.id, + 'description': self.description, + }) + self.orphan_result = True - def _force(self, message=None, exact=False): - """Force a rebuild and return a recordset of forced builds""" - forced_builds = self.env['runbot.build'] - for build in self: - pending_ids = self.search([('local_state', '=', 'pending')], order='id', limit=1) - if pending_ids: - sequence = pending_ids[0].id - else: - sequence = self.search([], order='id desc', limit=1)[0].id - # Force it now - if build.local_state in ['running', 'done', 'duplicate']: - values = { - 'sequence': sequence, - 'branch_id': build.branch_id.id, - 'name': build.name, - 'date': build.date, - 'author': build.author, - 'author_email': build.author_email, - 'committer': build.committer, - 'committer_email': build.committer_email, - 'subject': build.subject, - 'build_type': 'rebuild', - } - if exact: - values.update({ - 'config_id': build.config_id.id, - 'extra_params': build.extra_params, - 'config_data': build.config_data, - 'orphan_result': build.orphan_result, - 'dependency_ids': build._copy_dependency_ids(), - 'description': build.description, - }) - # if replace: ? - if build.parent_id: - values.update({ - 'parent_id': build.parent_id.id, # attach it to parent - 'hidden': build.hidden, - }) - build.orphan_result = True # set result of build as orphan + new_build = self.create(values) + if self.parent_id: + new_build._github_status() + user = request.env.user if request else self.env.user + new_build._log('rebuild', 'Rebuild initiated by %s%s' % (user.name, (' :%s' % message) if message else '')) - new_build = build.with_context(force_rebuild=True).create(values) - forced_builds |= new_build - user = request.env.user if request else self.env.user - new_build._log('rebuild', 'Rebuild initiated by %s (%s)%s' % (user.name, 'exact' if exact else 'default', (' :%s' % message) if message else '')) - return forced_builds + if self.local_state != 'done': + self._ask_kill('Killed by rebuild requested by %s (%s) (new build:%s)' % (user.name, user.id, new_build.id)) + + if not self.parent_id: + slots = self.env['runbot.batch.slot'].search([('build_id', '=', self.id)]) + for slot in slots: + slot.copy({ + 'build_id': new_build.id, + 'link_type': 'rebuild', + }) + slot.active = False + return new_build def _skip(self, reason=None): """Mark builds ids as skipped""" if reason: self._logger('skip %s', reason) - self.write({'local_state': 'done', 'local_result': 'skipped', 'duplicate_id': False}) + self.write({'local_state': 'done', 'local_result': 'skipped'}) def _build_from_dest(self, dest): if dest_reg.match(dest): @@ -527,7 +459,7 @@ class runbot_build(models.Model): dest_by_builds_ids = defaultdict(list) ignored = set() icp = self.env['ir.config_parameter'] - hide_in_logs = icp.get_param('runbot.runbot_db_template', default='template1') + hide_in_logs = icp.get_param('runbot.runbot_db_template', default='template0') for dest in dest_list: build = self._build_from_dest(dest) @@ -536,20 +468,20 @@ class runbot_build(models.Model): elif dest != hide_in_logs: ignored.add(dest) if ignored: - _logger.debug('%s (%s) not deleted because not dest format', label, " ".join(list(ignored))) + _logger.debug('%s (%s) not deleted because not dest format', label, list(ignored)) builds = self.browse(dest_by_builds_ids) existing = builds.exists() remaining = (builds - existing) if remaining: dest_list = [dest for sublist in [dest_by_builds_ids[rem_id] for rem_id in remaining.ids] for dest in sublist] - _logger.debug('(%s) (%s) not deleted because no corresponding build found' % (label, " ".join(dest_list))) + _logger.debug('(%s) (%s) not deleted because no corresponding build found', label, " ".join(dest_list)) for build in existing: if fields.Datetime.from_string(build.gc_date) < datetime.datetime.now(): if build.local_state == 'done': for db in dest_by_builds_ids[build.id]: yield db elif build.local_state != 'running': - _logger.warning('db (%s) not deleted because state is not done' % " ".join(dest_by_builds_ids[build.id])) + _logger.warning('db (%s) not deleted because state is not done', " ".join(dest_by_builds_ids[build.id])) def _local_cleanup(self, force=False): """ @@ -578,7 +510,7 @@ class runbot_build(models.Model): self._logger('Removing database') self._local_pg_dropdb(db) - root = self.env['runbot.repo']._root() + root = self.env['runbot.runbot']._root() builds_dir = os.path.join(root, 'build') if force is True: @@ -642,11 +574,10 @@ class runbot_build(models.Model): build.write(values) if not build.active_step: build._log('_schedule', 'No job in config, doing nothing') + build.local_result = 'warn' continue try: - build._log('_schedule', 'Init build environment with config %s ' % build.config_id.name) - # notify pending build - avoid confusing users by saying nothing - build._github_status() + build._log('_schedule', 'Init build environment with config %s ' % build.params_id.config_id.name) os.makedirs(build._path('logs'), exist_ok=True) except Exception: _logger.exception('Failed initiating build %s', build.dest) @@ -685,8 +616,8 @@ class runbot_build(models.Model): 'port': port, }) build._log('wake_up', '**Waking up build**', log_type='markdown', level='SEPARATOR') - self.env['runbot.build.config.step']._run_odoo_run(build, log_path) - # reload_nginx will be triggered by _run_odoo_run + self.env['runbot.build.config.step']._run_run_odoo(build, log_path, force=True) + # reload_nginx will be triggered by _run_run_odoo except Exception: _logger.exception('Failed to wake up build %s', build.dest) build._log('_schedule', 'Failed waking up build', level='ERROR') @@ -703,7 +634,7 @@ class runbot_build(models.Model): # failfast in case of docker error (triggered in database) if build.triggered_result and not build.active_step.ignore_triggered_result: worst_result = self._get_worst_result([build.triggered_result, build.local_result]) - if worst_result != build.local_result: + if worst_result != build.local_result: build.local_result = build.triggered_result build._github_status() # failfast # check if current job is finished @@ -753,10 +684,10 @@ class runbot_build(models.Model): build.write(build_values) if ending_build: - build._github_status() if not build.local_result: # Set 'ok' result if no result set (no tests job on build) build.local_result = 'ok' build._logger("No result set, setting ok by default") + build._github_status() build._run_job() def _run_job(self): @@ -781,7 +712,7 @@ class runbot_build(models.Model): """Return the repo build path""" self.ensure_one() build = self - root = self.env['runbot.repo']._root() + root = self.env['runbot.runbot']._root() return os.path.join(root, 'build', build.dest, *l) def http_log_url(self): @@ -795,27 +726,14 @@ class runbot_build(models.Model): return commit._source_path('odoo', *path) return commit._source_path('openerp', *path) - def _get_available_modules(self, commit): - for manifest_file_name in commit.repo.manifest_files.split(','): # '__manifest__.py' '__openerp__.py' - for addons_path in (commit.repo.addons_paths or '').split(','): # '' 'addons' 'odoo/addons' - sep = os.path.join(addons_path, '*') - for manifest_path in glob.glob(commit._source_path(sep, manifest_file_name)): - module = os.path.basename(os.path.dirname(manifest_path)) - yield (addons_path, module, manifest_file_name) - def _docker_source_folder(self, commit): - # in case some build have commits with the same repo name (ex: foo/bar, foo-ent/bar) - # it can be usefull to uniquify commit export path using hash - if self.commit_path_mode == 'rep_sha': - return '%s-%s' % (commit.repo._get_repo_name_part(), commit.sha[:8]) - else: - return commit.repo._get_repo_name_part() + return commit.repo_id.name - def _checkout(self, commits=None): + def _checkout(self): self.ensure_one() # will raise exception if hash not found, we don't want to fail for all build. # checkout branch exports = {} - for commit in commits or self._get_all_commit(): + for commit in self.env.context.get('defined_commit_ids') or self.params_id.commit_ids: build_export_path = self._docker_source_folder(commit) if build_export_path in exports: self._log('_checkout', 'Multiple repo have same export path in build, some source may be missing for %s' % build_export_path, level='ERROR') @@ -823,13 +741,11 @@ class runbot_build(models.Model): exports[build_export_path] = commit.export() return exports - def _get_repo_available_modules(self, commits=None): - available_modules = [] - repo_modules = [] - for commit in commits or self._get_all_commit(): - for (addons_path, module, manifest_file_name) in self._get_available_modules(commit): - if commit.repo == self.repo_id: - repo_modules.append(module) + def _get_available_modules(self): + available_modules = defaultdict(list) + # repo_modules = [] + for commit in self.env.context.get('defined_commit_ids') or self.params_id.commit_ids: + for (addons_path, module, manifest_file_name) in commit._get_available_modules(): if module in available_modules: self._log( 'Building environment', @@ -837,33 +753,33 @@ class runbot_build(models.Model): level='WARNING' ) else: - available_modules.append(module) - return repo_modules, available_modules + available_modules[commit.repo_id].append(module) + # return repo_modules, available_modules + return available_modules - def _get_modules_to_test(self, commits=None, modules_patterns=''): - self.ensure_one() # will raise exception if hash not found, we don't want to fail for all build. + def _get_modules_to_test(self, modules_patterns=''): + self.ensure_one() - # checkout branch - repo_modules, available_modules = self._get_repo_available_modules(commits=commits) + def filter_patterns(patterns, default, all): + default = set(default) + patterns_list = (patterns or '').split(',') + patterns_list = [p.strip() for p in patterns_list] + for pat in patterns_list: + if pat.startswith('-'): + pat = pat.strip('- ') + default -= {mod for mod in default if fnmatch.fnmatch(mod, pat)} + elif pat: + default |= {mod for mod in all if fnmatch.fnmatch(mod, pat)} + return default - patterns_list = [] - for pats in [self.repo_id.modules, self.branch_id.modules, modules_patterns]: - patterns_list += [p.strip() for p in (pats or '').split(',')] + available_modules = [] + modules_to_install = set() + for repo, module_list in self._get_available_modules().items(): + available_modules += module_list + modules_to_install |= filter_patterns(repo.modules, module_list, module_list) - if self.repo_id.modules_auto == 'all': - default_repo_modules = available_modules - elif self.repo_id.modules_auto == 'repo': - default_repo_modules = repo_modules - else: - default_repo_modules = [] - - modules_to_install = set(default_repo_modules) - for pat in patterns_list: - if pat.startswith('-'): - pat = pat.strip('- ') - modules_to_install -= {mod for mod in modules_to_install if fnmatch.fnmatch(mod, pat)} - else: - modules_to_install |= {mod for mod in available_modules if fnmatch.fnmatch(mod, pat)} + modules_to_install = filter_patterns(self.params_id.modules, modules_to_install, available_modules) + modules_to_install = filter_patterns(modules_patterns, modules_to_install, available_modules) return sorted(modules_to_install) @@ -882,11 +798,12 @@ class runbot_build(models.Model): def _local_pg_createdb(self, dbname): icp = self.env['ir.config_parameter'] - db_template = icp.get_param('runbot.runbot_db_template', default='template1') + db_template = icp.get_param('runbot.runbot_db_template', default='template0') self._local_pg_dropdb(dbname) _logger.debug("createdb %s", dbname) with local_pgadmin_cursor() as local_cr: local_cr.execute(sql.SQL("""CREATE DATABASE {} TEMPLATE %s LC_COLLATE 'C' ENCODING 'unicode'""").format(sql.Identifier(dbname)), (db_template,)) + self.env['runbot.database'].create({'name': dbname, 'build_id': self.id}) def _log(self, func, message, level='INFO', log_type='runbot', path='runbot'): self.ensure_one() @@ -909,7 +826,7 @@ class runbot_build(models.Model): continue build._log('kill', 'Kill build %s' % build.dest) docker_stop(build._get_docker_name(), build._path()) - v = {'local_state': 'done', 'requested_action': False, 'active_step': False, 'duplicate_id': False, 'job_end': now()} # what if duplicate? state done? + v = {'local_state': 'done', 'requested_action': False, 'active_step': False, 'job_end': now()} if not build.build_end: v['build_end'] = now() if result: @@ -920,6 +837,8 @@ class runbot_build(models.Model): self.invalidate_cache() def _ask_kill(self, lock=True, message=None): + # if build remains in same bundle, it's ok like that + # if build can be cross bundle, need to check number of ref to build if lock: self.env.cr.execute("""SELECT id FROM runbot_build WHERE parent_path like %s FOR UPDATE""", ['%s%%' % self.parent_path]) self.ensure_one() @@ -928,56 +847,45 @@ class runbot_build(models.Model): build = self message = message or 'Killing build %s, requested by %s (user #%s)' % (build.dest, user.name, uid) build._log('_ask_kill', message) - if build.duplicate_id: - if self.branch_id.pull_branch_name == self.duplicate_id.branch_id.pull_branch_name: - build = build.duplicate_id - else: - build._skip() - return if build.local_state == 'pending': build._skip() elif build.local_state in ['testing', 'running']: build.requested_action = 'deathrow' - for child in build.children_ids: # should we filter build that are target of a duplicate_id? - if not child.duplicate_id: - child._ask_kill(lock=False) + for child in build.children_ids: + child._ask_kill(lock=False) def _wake_up(self): - build = self.real_build - if build.local_state != 'done': - build._log('wake_up', 'Impossibe to wake up, state is not done') + if self.local_state != 'done': + self._log('wake_up', 'Impossibe to wake up, state is not done') else: - build.requested_action = 'wake_up' + self.requested_action = 'wake_up' - def _get_all_commit(self): - return [Commit(self.repo_id, self.name)] + [Commit(dep._get_repo(), dep.dependency_hash) for dep in self.dependency_ids] - - def _get_server_commit(self, commits=None): + def _get_server_commit(self): """ - returns a Commit() of the first repo containing server files found in commits or in build commits + returns a commit of the first repo containing server files found in commits or in build commits the commits param is not used in code base but could be usefull for jobs and crons """ - for commit in (commits or self._get_all_commit()): - if commit.repo.server_files: + for commit in (self.env.context.get('defined_commit_ids') or self.params_id.commit_ids): + if commit.repo_id.server_files: return commit raise ValidationError('No repo found with defined server_files') - def _get_addons_path(self, commits=None): - for commit in (commits or self._get_all_commit()): + def _get_addons_path(self): + for commit in (self.env.context.get('defined_commit_ids') or self.params_id.commit_ids): + if not commit.repo_id.manifest_files: + continue # skip repo without addons source_path = self._docker_source_folder(commit) - for addons_path in (commit.repo.addons_paths or '').split(','): + for addons_path in (commit.repo_id.addons_paths or '').split(','): if os.path.isdir(commit._source_path(addons_path)): yield os.path.join(source_path, addons_path).strip(os.sep) def _get_server_info(self, commit=None): - server_dir = False - server = False commit = commit or self._get_server_commit() - for server_file in commit.repo.server_files.split(','): + for server_file in commit.repo_id.server_files.split(','): if os.path.isfile(commit._source_path(server_file)): return (commit, server_file) - _logger.error('None of %s found in commit, actual commit content:\n %s' % (commit.repo.server_files, os.listdir(commit._source_path()))) - raise RunbotException('No server found in %s' % commit) + _logger.error('None of %s found in commit, actual commit content:\n %s' % (commit.repo_id.server_files, os.listdir(commit._source_path()))) + raise RunbotException('No server found in %s' % commit.dname) def _cmd(self, python_params=None, py_version=None, local_only=True, sub_command=None): """Return a list describing the command to start the build @@ -987,9 +895,9 @@ class runbot_build(models.Model): python_params = python_params or [] py_version = py_version if py_version is not None else build._get_py_version() pres = [] - for commit in self._get_all_commit(): - if os.path.isfile(commit._source_path('requirements.txt')): - repo_dir = self._docker_source_folder(commit) + for commit_id in self.env.context.get('defined_commit_ids') or self.params_id.commit_ids: + if os.path.isfile(commit_id._source_path('requirements.txt')): # this is a change I think + repo_dir = self._docker_source_folder(commit_id) requirement_path = os.path.join(repo_dir, 'requirements.txt') pres.append(['sudo', 'pip%s' % py_version, 'install', '-r', '%s' % requirement_path]) @@ -1009,7 +917,7 @@ class runbot_build(models.Model): if grep(config_path, "no-netrpc"): cmd.append("--no-netrpc") - command = Command(pres, cmd, []) + command = Command(pres, cmd, [], cmd_checker=build) # use the username of the runbot host to connect to the databases command.add_config_tuple('db_user', '%s' % pwd.getpwuid(os.getuid()).pw_name) @@ -1037,72 +945,23 @@ class runbot_build(models.Model): return command - def _github_status_notify_all(self, status): - """Notify each repo with a status""" - self.ensure_one() - if self.config_id.update_github_state: - repos_ids = {b.repo_id.id for b in self.search([('name', '=', self.name)])} - build_name = self.name - user_id = self.env.user.id - _dbname = self.env.cr.dbname - _context = self.env.context - build_id = self.id - def send_github_status(): - try: - db_registry = registry(_dbname) - with api.Environment.manage(), db_registry.cursor() as cr: - env = api.Environment(cr, user_id, _context) - repos = env['runbot.repo'].browse(repos_ids) - for repo in repos: - _logger.debug( - "github updating %s status %s to %s in repo %s", - status['context'], build_name, status['state'], repo.name) - repo._github('/repos/:owner/:repo/statuses/%s' % build_name, status, ignore_errors=True) - except: - _logger.exception('Something went wrong sending notification for %s', build_id) - self._cr.after('commit', send_github_status) - - - def _github_status(self): - """Notify github of failed/successful builds""" - for build in self: - if build.parent_id: - if build.orphan_result: - _logger.debug('Skipping result for orphan build %s', self.id) - else: - build.parent_id._github_status() - elif build.config_id.update_github_state: - runbot_domain = self.env['runbot.repo']._domain() - desc = "runbot build %s" % (build.dest,) - - if build.global_result in ('ko', 'warn'): - state = 'failure' - elif build.global_state == 'testing': - state = 'pending' - elif build.global_state in ('running', 'done'): - state = 'error' - if build.global_result == 'ok': - state = 'success' - else: - _logger.debug("skipping github status for build %s ", build.id) - continue - desc += " (runtime %ss)" % (build.job_time,) - - status = { - "state": state, - "target_url": "http://%s/runbot/build/%s" % (runbot_domain, build.id), - "description": desc, - "context": "ci/runbot" - } - if self.last_github_state != state: - build._github_status_notify_all(status) - self.last_github_state = state - else: - _logger.debug('Skipping unchanged status for %s', self.id) + def _cmd_check(self, cmd): + """ + Check the cmd right before creating the build command line executed in + a Docker container. If a database creation is found in the cmd, a + 'runbot.database' is created. + This method is intended to be called from cmd itself + """ + if '-d' in cmd: + dbname = cmd[cmd.index('-d') + 1] + self.env['runbot.database'].create({ + 'name': dbname, + 'build_id': self.id + }) def _next_job_values(self): self.ensure_one() - step_ids = self.config_id.step_ids() + step_ids = self.params_id.config_id.step_ids() if not step_ids: # no job to do, build is done return {'active_step': False, 'local_state': 'done'} @@ -1111,14 +970,21 @@ class runbot_build(models.Model): return {'active_step': False, 'local_state': 'done'} next_index = step_ids.index(self.active_step) + 1 if self.active_step else 0 - if next_index >= len(step_ids): # final job, build is done - return {'active_step': False, 'local_state': 'done'} - new_step = step_ids[next_index] # job to do, state is job_state (testing or running) + while True: + if next_index >= len(step_ids): # final job, build is done + return {'active_step': False, 'local_state': 'done'} + new_step = step_ids[next_index] # job to do, state is job_state (testing or running) + if new_step.domain_filter and not self.filtered_domain(safe_eval(new_step.domain_filter)): + + self._log('run', '**Skipping** step ~~%s~~ from config **%s**' % (new_step.name, self.params_id.config_id.name), log_type='markdown', level='SEPARATOR') + next_index += 1 + continue + break return {'active_step': new_step.id, 'local_state': new_step._step_state()} def _get_py_version(self): - """return the python name to use from build instance""" + """return the python name to use from build batch""" (server_commit, server_file) = self._get_server_info() server_path = server_commit._source_path(server_file) with open(server_path, 'r') as f: @@ -1134,6 +1000,10 @@ class runbot_build(models.Model): ir_logs = self.env['ir.logging'].search([('level', '=', 'ERROR'), ('type', '=', 'server'), ('build_id', 'in', builds_to_scan.ids)]) BuildError._parse_logs(ir_logs) + def is_file(self, file, mode='r'): + file_path = self._path(file) + return os.path.exists(file_path) + def read_file(self, file, mode='r'): file_path = self._path(file) try: @@ -1175,5 +1045,52 @@ class runbot_build(models.Model): def get_formated_build_age(self): return s2human(self.build_age) - def sorted_revdep_build_ids(self): - return sorted(self.revdep_build_ids, key=lambda build: build.repo_id.name) + def get_color_class(self): + + if self.global_result == 'ko': + return 'danger' + if self.global_result == 'warn': + return 'warning' + + if self.global_state == 'pending': + return 'default' + if self.global_state in ('testing', 'waiting'): + return 'info' + + if self.global_result == 'ok': + return 'success' + + if self.global_result in ('skipped', 'killed', 'manually_killed'): + return 'killed' + + def _github_status(self, post_commit=True): + """Notify github of failed/successful builds""" + for build in self: + # TODO maybe avoid to send status if build is killable (another new build exist and will send the status) + if build.parent_id: + if build.orphan_result: + _logger.debug('Skipping result for orphan build %s', self.id) + else: + build.parent_id._github_status(post_commit) + elif build.params_id.config_id == build.params_id.trigger_id.config_id: + if build.global_result in ('ko', 'warn'): + state = 'failure' + elif build.global_state in ('pending', 'testing'): + state = 'pending' + elif build.global_state in ('running', 'done'): + state = 'error' + if build.global_result == 'ok': + state = 'success' + else: + _logger.debug("skipping github status for build %s ", build.id) + continue + + runbot_domain = self.env['runbot.runbot']._domain() + trigger = self.params_id.trigger_id + target_url = trigger.ci_url or "http://%s/runbot/build/%s" % (runbot_domain, build.id) + desc = trigger.ci_description or " (runtime %ss)" % (build.job_time,) + if trigger.ci_context: + for build_commit in self.params_id.commit_link_ids: + commit = build_commit.commit_id + if build_commit.match_type != 'default' and commit.repo_id in trigger.repo_ids: + commit._github_status(build, trigger.ci_context, state, target_url, desc, post_commit) diff --git a/runbot/models/build_config.py b/runbot/models/build_config.py index b287abbc..5ea2a068 100644 --- a/runbot/models/build_config.py +++ b/runbot/models/build_config.py @@ -1,15 +1,15 @@ import base64 import glob import logging +import fnmatch import re import shlex import time -from ..common import now, grep, time2str, rfind, Commit, s2human, os +from ..common import now, grep, time2str, rfind, s2human, os, RunbotException from ..container import docker_run, docker_get_gateway_ip, Command from odoo import models, fields, api from odoo.exceptions import UserError, ValidationError from odoo.tools.safe_eval import safe_eval, test_python_expr -from odoo.addons.runbot.models.repo import RunbotException _logger = logging.getLogger(__name__) @@ -20,18 +20,17 @@ PYTHON_DEFAULT = "# type python code here\n\n\n\n\n\n" class Config(models.Model): - _name = "runbot.build.config" + _name = 'runbot.build.config' _description = "Build config" _inherit = "mail.thread" - name = fields.Char('Config name', required=True, unique=True, track_visibility='onchange', help="Unique name for config please use trigram as postfix for custom configs") + name = fields.Char('Config name', required=True, tracking=True, help="Unique name for config please use trigram as postfix for custom configs") + description = fields.Char('Config description') step_order_ids = fields.One2many('runbot.build.config.step.order', 'config_id', copy=True) - update_github_state = fields.Boolean('Notify build state to github', default=False, track_visibility='onchange') - protected = fields.Boolean('Protected', default=False, track_visibility='onchange') + protected = fields.Boolean('Protected', default=False, tracking=True) group = fields.Many2one('runbot.build.config', 'Configuration group', help="Group of config's and config steps") group_name = fields.Char('Group name', related='group.name') - monitoring_view_id = fields.Many2one('ir.ui.view', 'Monitoring view') @api.model_create_single def create(self, values): @@ -70,7 +69,7 @@ class Config(models.Model): raise UserError('Jobs of type run_odoo should be preceded by a job of type install_odoo') self._check_recustion() - def _check_recustion(self, visited=None): # todo test + def _check_recustion(self, visited=None): visited = visited or [] recursion = False if self in visited: @@ -84,52 +83,88 @@ class Config(models.Model): create_config._check_recustion(visited[:]) +class ConfigStepUpgradeDb(models.Model): + _name = 'runbot.config.step.upgrade.db' + _description = "Config Step Upgrade Db" + + step_id = fields.Many2one('runbot.build.config.step', 'Step') + config_id = fields.Many2one('runbot.build.config', 'Config') + db_pattern = fields.Char('Db suffix pattern') + min_target_version_id = fields.Many2one('runbot.version', "Minimal target version_id") + + class ConfigStep(models.Model): _name = 'runbot.build.config.step' _description = "Config step" _inherit = 'mail.thread' # general info - name = fields.Char('Step name', required=True, unique=True, track_visibility='onchange', help="Unique name for step please use trigram as postfix for custom step_ids") + name = fields.Char('Step name', required=True, unique=True, tracking=True, help="Unique name for step please use trigram as postfix for custom step_ids") + domain_filter = fields.Char('Domain filter', tracking=True) job_type = fields.Selection([ ('install_odoo', 'Test odoo'), ('run_odoo', 'Run odoo'), ('python', 'Python code'), ('create_build', 'Create build'), - ], default='install_odoo', required=True, track_visibility='onchange') - protected = fields.Boolean('Protected', default=False, track_visibility='onchange') - default_sequence = fields.Integer('Sequence', default=100, track_visibility='onchange') # or run after? # or in many2many rel? + ('configure_upgrade', 'Configure Upgrade'), + ('configure_upgrade_complement', 'Configure Upgrade Complement'), + ('test_upgrade', 'Test Upgrade'), + ('restore', 'Restore') + ], default='install_odoo', required=True, tracking=True) + protected = fields.Boolean('Protected', default=False, tracking=True) + default_sequence = fields.Integer('Sequence', default=100, tracking=True) # or run after? # or in many2many rel? step_order_ids = fields.One2many('runbot.build.config.step.order', 'step_id') group = fields.Many2one('runbot.build.config', 'Configuration group', help="Group of config's and config steps") group_name = fields.Char('Group name', related='group.name') make_stats = fields.Boolean('Make stats', default=False) build_stat_regex_ids = fields.Many2many('runbot.build.stat.regex', string='Stats Regexes') # install_odoo - create_db = fields.Boolean('Create Db', default=True, track_visibility='onchange') # future - custom_db_name = fields.Char('Custom Db Name', track_visibility='onchange') # future + create_db = fields.Boolean('Create Db', default=True, tracking=True) # future + custom_db_name = fields.Char('Custom Db Name', tracking=True) # future install_modules = fields.Char('Modules to install', help="List of module patterns to install, use * to install all available modules, prefix the pattern with dash to remove the module.", default='') - db_name = fields.Char('Db Name', compute='_compute_db_name', inverse='_inverse_db_name', track_visibility='onchange') - cpu_limit = fields.Integer('Cpu limit', default=3600, track_visibility='onchange') - coverage = fields.Boolean('Coverage', default=False, track_visibility='onchange') - flamegraph = fields.Boolean('Allow Flamegraph', default=False, track_visibility='onchange') - test_enable = fields.Boolean('Test enable', default=True, track_visibility='onchange') - test_tags = fields.Char('Test tags', help="comma separated list of test tags", track_visibility='onchange') - enable_auto_tags = fields.Boolean('Allow auto tag', default=False, track_visibility='onchange') - sub_command = fields.Char('Subcommand', track_visibility='onchange') - extra_params = fields.Char('Extra cmd args', track_visibility='onchange') - additionnal_env = fields.Char('Extra env', help='Example: foo="bar",bar="foo". Cannot contains \' ', track_visibility='onchange') + db_name = fields.Char('Db Name', compute='_compute_db_name', inverse='_inverse_db_name', tracking=True) + cpu_limit = fields.Integer('Cpu limit', default=3600, tracking=True) + coverage = fields.Boolean('Coverage', default=False, tracking=True) + flamegraph = fields.Boolean('Allow Flamegraph', default=False, tracking=True) + test_enable = fields.Boolean('Test enable', default=True, tracking=True) + test_tags = fields.Char('Test tags', help="comma separated list of test tags", tracking=True) + enable_auto_tags = fields.Boolean('Allow auto tag', default=False, tracking=True) + sub_command = fields.Char('Subcommand', tracking=True) + extra_params = fields.Char('Extra cmd args', tracking=True) + additionnal_env = fields.Char('Extra env', help='Example: foo="bar",bar="foo". Cannot contains \' ', tracking=True) # python - python_code = fields.Text('Python code', track_visibility='onchange', default=PYTHON_DEFAULT) - python_result_code = fields.Text('Python code for result', track_visibility='onchange', default=PYTHON_DEFAULT) - ignore_triggered_result = fields.Boolean('Ignore error triggered in logs', track_visibility='onchange', default=False) + python_code = fields.Text('Python code', tracking=True, default=PYTHON_DEFAULT) + python_result_code = fields.Text('Python code for result', tracking=True, default=PYTHON_DEFAULT) + ignore_triggered_result = fields.Boolean('Ignore error triggered in logs', tracking=True, default=False) running_job = fields.Boolean('Job final state is running', default=False, help="Docker won't be killed if checked") # create_build - create_config_ids = fields.Many2many('runbot.build.config', 'runbot_build_config_step_ids_create_config_ids_rel', string='New Build Configs', track_visibility='onchange', index=True) - number_builds = fields.Integer('Number of build to create', default=1, track_visibility='onchange') - hide_build = fields.Boolean('Hide created build in frontend', default=True, track_visibility='onchange') - force_build = fields.Boolean("As a forced rebuild, don't use duplicate detection", default=False, track_visibility='onchange') - force_host = fields.Boolean('Use same host as parent for children', default=False, track_visibility='onchange') # future - make_orphan = fields.Boolean('No effect on the parent result', help='Created build result will not affect parent build result', default=False, track_visibility='onchange') + create_config_ids = fields.Many2many('runbot.build.config', 'runbot_build_config_step_ids_create_config_ids_rel', string='New Build Configs', tracking=True, index=True) + number_builds = fields.Integer('Number of build to create', default=1, tracking=True) + + force_host = fields.Boolean('Use same host as parent for children', default=False, tracking=True) # future + make_orphan = fields.Boolean('No effect on the parent result', help='Created build result will not affect parent build result', default=False, tracking=True) + + # upgrade + # 1. define target + upgrade_to_master = fields.Boolean() # upgrade niglty + (future migration? no, need last master, not nightly master) + upgrade_to_current = fields.Boolean(help="If checked, only upgrade to current will be used, other options will be ignored") + upgrade_to_major_versions = fields.Boolean() # upgrade (no master) + upgrade_to_all_versions = fields.Boolean() # upgrade niglty (no master) + upgrade_to_version_ids = fields.Many2many('runbot.version', relation='runbot_upgrade_to_version_ids', string='Forced version to use as target') + # 2. define source from target + #upgrade_from_current = fields.Boolean() #usefull for future migration (13.0-dev/13.3-dev -> master) AVOID TO USE THAT + upgrade_from_previous_major_version = fields.Boolean() # 13.0 + upgrade_from_last_intermediate_version = fields.Boolean() # 13.3 + upgrade_from_all_intermediate_version = fields.Boolean() # 13.2 # 13.1 + upgrade_from_version_ids = fields.Many2many('runbot.version', relation='runbot_upgrade_from_version_ids', string='Forced version to use as source (cartesian with target)') + + upgrade_flat = fields.Boolean("Flat", help="Take all decisions in on build") + + upgrade_config_id = fields.Many2one('runbot.build.config',string='Upgrade Config', tracking=True, index=True) + upgrade_dbs = fields.One2many('runbot.config.step.upgrade.db', 'step_id', tracking=True) + + restore_download_db_suffix = fields.Char('Download db suffix') + restore_rename_db_suffix = fields.Char('Rename db suffix') @api.constrains('python_code') def _check_python_code(self): @@ -145,13 +180,6 @@ class ConfigStep(models.Model): if msg: raise ValidationError(msg) - @api.onchange('number_builds') - def _onchange_number_builds(self): - if self.number_builds > 1: - self.force_build = True - else: - self.force_build = False - @api.onchange('sub_command') def _onchange_number_builds(self): if self.sub_command: @@ -207,25 +235,15 @@ class ConfigStep(models.Model): def _run(self, build): log_path = build._path('logs', '%s.txt' % self.name) build.write({'job_start': now(), 'job_end': False}) # state, ... - build._log('run', 'Starting step **%s** from config **%s**' % (self.name, build.config_id.name), log_type='markdown', level='SEPARATOR') + build._log('run', 'Starting step **%s** from config **%s**' % (self.name, build.params_id.config_id.name), log_type='markdown', level='SEPARATOR') return self._run_step(build, log_path) def _run_step(self, build, log_path): build.log_counter = self.env['ir.config_parameter'].sudo().get_param('runbot.runbot_maxlogs', 100) - if self.job_type == 'run_odoo': - return self._run_odoo_run(build, log_path) - if self.job_type == 'install_odoo': - return self._run_odoo_install(build, log_path) - elif self.job_type == 'python': - return self._run_python(build, log_path) - elif self.job_type == 'create_build': - return self._create_build(build, log_path) - - def _create_build(self, build, log_path): - Build = self.env['runbot.build'] - if self.force_build: - Build = Build.with_context(force_rebuild=True) + run_method = getattr(self, '_run_%s' % self.job_type) + return run_method(build, log_path) + def _run_create_build(self, build, log_path): count = 0 for create_config in self.create_config_ids: for _ in range(self.number_builds): @@ -233,23 +251,8 @@ class ConfigStep(models.Model): if count > 200: build._logger('Too much build created') break - children = Build.create({ - 'dependency_ids': build._copy_dependency_ids(), - 'config_id': create_config.id, - 'parent_id': build.id, - 'branch_id': build.branch_id.id, - 'name': build.name, - 'build_type': build.build_type, - 'date': build.date, - 'author': build.author, - 'author_email': build.author_email, - 'committer': build.committer, - 'committer_email': build.committer_email, - 'subject': build.subject, - 'hidden': self.hide_build, - 'orphan_result': self.make_orphan, - }) - build._log('create_build', 'created with config %s' % create_config.name, log_type='subbuild', path=str(children.id)) + child = build._add_child({'config_id': create_config.id}, orphan=self.make_orphan) + build._log('create_build', 'created with config %s' % create_config.name, log_type='subbuild', path=str(child.id)) def make_python_ctx(self, build): return { @@ -262,14 +265,14 @@ class ConfigStep(models.Model): 'log_path': build._path('logs', '%s.txt' % self.name), 'glob': glob.glob, 'Command': Command, - 'Commit': Commit, 'base64': base64, 're': re, 'time': time, 'grep': grep, 'rfind': rfind, } - def _run_python(self, build, log_path): # TODO rework log_path after checking python steps, compute on build + + def _run_python(self, build, log_path): eval_ctx = self.make_python_ctx(build) try: safe_eval(self.python_code.strip(), eval_ctx, mode="exec", nocopy=True) @@ -283,14 +286,21 @@ class ConfigStep(models.Model): else: raise - def _is_docker_step(self): if not self: return False self.ensure_one() - return self.job_type in ('install_odoo', 'run_odoo') or (self.job_type == 'python' and 'docker_run(' in self.python_code) + return self.job_type in ('install_odoo', 'run_odoo', 'restore', 'test_upgrade') or (self.job_type == 'python' and ('docker_run(' in self.python_code or '_run_install_odoo(' in self.python_code)) + + def _run_run_odoo(self, build, log_path, force=False): + if not force: + if build.parent_id: + build._log('_run_run_odoo', 'build has a parent, skip run') + return + if build.no_auto_run: + build._log('_run_run_odoo', 'build auto run is disabled, skip run') + return - def _run_odoo_run(self, build, log_path): exports = build._checkout() # update job_start AFTER checkout to avoid build being killed too soon if checkout took some time and docker take some time to start build.job_start = now() @@ -307,15 +317,17 @@ class ConfigStep(models.Model): # not sure, to avoid old server to check other dbs cmd += ["--max-cron-threads", "0"] - db_name = build.config_data.get('db_name') or [step.db_name for step in build.config_id.step_ids() if step.job_type == 'install_odoo'][-1] + db_name = build.params_id.config_data.get('db_name') or [step.db_name for step in build.params_id.config_id.step_ids() if step.job_type == 'install_odoo'][-1] # we need to have at least one job of type install_odoo to run odoo, take the last one for db_name. cmd += ['-d', '%s-%s' % (build.dest, db_name)] - if grep(build._server("tools/config.py"), "proxy-mode") and build.repo_id.nginx: + icp = self.env['ir.config_parameter'].sudo() + nginx = icp.get_param('runbot.runbot_nginx', True) + if grep(build._server("tools/config.py"), "proxy-mode") and nginx: cmd += ["--proxy-mode"] if grep(build._server("tools/config.py"), "db-filter"): - if build.repo_id.nginx: + if nginx: cmd += ['--db-filter', '%d.*$'] else: cmd += ['--db-filter', '%s.*$' % build.dest] @@ -329,10 +341,10 @@ class ConfigStep(models.Model): self.env.cr.commit() # commit before docker run to be 100% sure that db state is consistent with dockers self.invalidate_cache() res = docker_run(cmd, log_path, build_path, docker_name, exposed_ports=[build_port, build_port + 1], ro_volumes=exports) - build.repo_id._reload_nginx() + self.env['runbot.runbot']._reload_nginx() return res - def _run_odoo_install(self, build, log_path): + def _run_install_odoo(self, build, log_path): exports = build._checkout() # update job_start AFTER checkout to avoid build being killed too soon if checkout took some time and docker take some time to start build.job_start = now() @@ -349,13 +361,13 @@ class ConfigStep(models.Model): python_params = ['-m', 'flamegraph', '-o', self._perfs_data_path()] cmd = build._cmd(python_params, py_version, sub_command=self.sub_command) # create db if needed - db_suffix = build.config_data.get('db_name') or self.db_name - db_name = "%s-%s" % (build.dest, db_suffix) + db_suffix = build.params_id.config_data.get('db_name') or (build.params_id.dump_db.db_suffix if not self.create_db else False) or self.db_name + db_name = '%s-%s' % (build.dest, db_suffix) if self.create_db: build._local_pg_createdb(db_name) cmd += ['-d', db_name] # list module to install - extra_params = build.extra_params or self.extra_params or '' + extra_params = build.params_id.extra_params or self.extra_params or '' if mods and '-i' not in extra_params: cmd += ['-i', mods] config_path = build._server("tools/config.py") @@ -402,7 +414,7 @@ class ConfigStep(models.Model): cmd.finals.append(['pg_dump', db_name, '>', sql_dest]) cmd.finals.append(['cp', '-r', filestore_path, filestore_dest]) cmd.finals.append(['cd', dump_dir, '&&', 'zip', '-rmq9', zip_path, '*']) - infos = '{\n "db_name": "%s",\n "build_id": %s,\n "shas": [%s]\n}' % (db_name, build.id, ', '.join(['"%s"' % commit for commit in build._get_all_commit()])) + infos = '{\n "db_name": "%s",\n "build_id": %s,\n "shas": [%s]\n}' % (db_name, build.id, ', '.join(['"%s"' % build_commit.commit_id.dname for build_commit in build.params_id.commit_link_ids])) build.write_file('logs/%s/info.json' % db_name, infos) if self.flamegraph: @@ -410,9 +422,357 @@ class ConfigStep(models.Model): cmd.finals.append(['gzip', '-f', self._perfs_data_path()]) # keep data but gz them to save disc space max_timeout = int(self.env['ir.config_parameter'].get_param('runbot.runbot_timeout', default=10000)) timeout = min(self.cpu_limit, max_timeout) - env_variables = self.additionnal_env.split(',') if self.additionnal_env else [] + env_variables = self.additionnal_env.split(';') if self.additionnal_env else [] return docker_run(cmd, log_path, build._path(), build._get_docker_name(), cpu_limit=timeout, ro_volumes=exports, env_variables=env_variables) + def _upgrade_create_childs(self): + pass + + def _run_configure_upgrade_complement(self, build, *args): + """ + Parameters: + - upgrade_dumps_trigger_id: a configure_upgradestep + + A complement aims to test the exact oposite of an upgrade trigger. + Ignore configs an categories: only focus on versions. + """ + param = build.params_id + version = param.version_id + builds_references = param.builds_reference_ids + builds_references_by_version_id = {b.params_id.version_id.id: b for b in builds_references} + upgrade_complement_step = build.params_id.trigger_id.upgrade_dumps_trigger_id.upgrade_step_id + version_domain = build.params_id.trigger_id.upgrade_dumps_trigger_id.get_version_domain() + valid_targets = build.browse() + next_versions = version.next_major_version_id | version.next_intermediate_version_ids + if version_domain: # filter only on version where trigger is enabled + next_versions = next_versions.filtered_domain(version_domain) + if next_versions: + for next_version in next_versions: + if version in upgrade_complement_step._get_upgrade_source_versions(next_version): + valid_targets |= (builds_references_by_version_id.get(next_version.id) or build.browse()) + + for target in valid_targets: + build._log('', 'Checking upgrade to [%s](%s)' % (target.params_id.version_id.name, target.build_url), log_type='markdown') + for upgrade_db in upgrade_complement_step.upgrade_dbs: + if not upgrade_db.min_target_version_id or upgrade_db.min_target_version_id.number <= target.params_id.version_id.number: + # note: here we don't consider the upgrade_db config here + dbs = build.database_ids.sorted('db_suffix') + for db in self._filter_upgrade_database(dbs, upgrade_db.db_pattern): + child = build._add_child({ + 'upgrade_to_build_id': target.id, + 'upgrade_from_build_id': build, # always current build + 'dump_db': db.id, + 'config_id': upgrade_complement_step.upgrade_config_id + }) + child.description = 'Testing migration from %s to %s using parent db %s' % ( + version.name, + target.params_id.version_id.name, + db.name, + ) + child._log('', 'This build tests change of schema in stable version testing upgrade to %s' % target.params_id.version_id.name) + + def _run_configure_upgrade(self, build, log_path): + """ + Source/target parameters: + - upgrade_to_current | (upgrade_to_master + (upgrade_to_major_versions | upgrade_to_all_versions)) + - upgrade_from_previous_major_version + (upgrade_from_all_intermediate_version | upgrade_from_last_intermediate_version) + - upgrade_dbs + - upgrade_to_version_ids (use instead of upgrade_to flags) + - upgrade_from_version_ids (use instead of upgrade_from flags) + + Other parameters + - upgrade_flat + - upgrade_config_id + + Create subbuilds with parameters defined for a step of type test_upgrade: + - upgrade_to_build_id + - upgrade_from_build_id + - dump_db + - config_id (upgrade_config_id) + + If upgrade_flat is False, a level of child will be create for target, source and dbs + (if there is multiple choices). + If upgrade_flat is True, all combination will be computed locally and only one level of children will be added to caller build. + + Note: + - This step should be alone in a config since this config is recursive + - A typical upgrade_config_id should have a restore step and a test_upgrade step. + """ + assert len(build.parent_path.split('/')) < 6 # small security to avoid recursion loop, 6 is arbitrary + param = build.params_id + end = False + target_builds = False + source_builds_by_target = {} + builds_references = param.builds_reference_ids + builds_references_by_version_id = {b.params_id.version_id.id: b for b in builds_references} + if param.upgrade_to_build_id: + target_builds = param.upgrade_to_build_id + else: + if self.upgrade_to_current: + target_builds = build + else: + target_builds = build.browse() + if self.upgrade_to_version_ids: + for version in self.upgrade_to_version_ids: + target_builds |= builds_references_by_version_id.get(version.id) or build.browse() + else: + master_build = builds_references.filtered(lambda b: b.params_id.version_id.name == 'master') + base_builds = (builds_references - master_build) + if self.upgrade_to_master: + target_builds = master_build + if self.upgrade_to_major_versions: + target_builds |= base_builds.filtered(lambda b: b.params_id.version_id.is_major) + elif self.upgrade_to_all_versions: + target_builds |= base_builds + target_builds = target_builds.sorted(lambda b: b.params_id.version_id.number) + if target_builds: + build._log('', 'Testing upgrade targeting %s' % ', '.join(target_builds.mapped('params_id.version_id.name'))) + if not target_builds: + build._log('_run_configure_upgrade', 'No reference build found with correct target in availables references, skipping. %s' % builds_references.mapped('params_id.version_id.name'), level='ERROR') + end = True + elif len(target_builds) > 1 and not self.upgrade_flat: + for target_build in target_builds: + build._add_child({'upgrade_to_build_id': target_build.id}) + end = True + if end: + return # replace this by a python job friendly solution + + for target_build in target_builds: + if param.upgrade_from_build_id: + source_builds_by_target[target_build] = param.upgrade_from_build_id + else: + target_version = target_build.params_id.version_id + from_builds = self._get_upgrade_source_builds(target_version, builds_references_by_version_id) + source_builds_by_target[target_build] = from_builds + if from_builds: + build._log('', 'Defining source version(s) for %s: %s' % (target_build.params_id.version_id.name, ', '.join(source_builds_by_target[target_build].mapped('params_id.version_id.name')))) + if not from_builds: + build._log('_run_configure_upgrade', 'No source version found for %s, skipping' % target_version.name, level='INFO') + elif not self.upgrade_flat: + for from_build in from_builds: + build._add_child({'upgrade_to_build_id': target_build.id, 'upgrade_from_build_id': from_build.id}) + end = True + + if end: + return # replace this by a python job friendly solution + + assert not param.dump_db + if not self.upgrade_dbs: + build._log('configure_upgrade', 'No upgrade dbs defined in step %s' % self.name, level='WARN') + for target, sources in source_builds_by_target.items(): + for source in sources: + for upgrade_db in self.upgrade_dbs: + if not upgrade_db.min_target_version_id or upgrade_db.min_target_version_id.number <= target.params_id.version_id.number: + config_id = upgrade_db.config_id + dump_builds = build.search([('id', 'child_of', source.id), ('params_id.config_id', '=', config_id.id), ('orphan_result', '=', False)]) + # this search is not optimal + if not dump_builds: + build._log('_run_configure_upgrade', 'No child build found with config %s in %s' % (config_id.name, source.id), level='ERROR') + dbs = dump_builds.database_ids.sorted('db_suffix') + valid_databases = list(self._filter_upgrade_database(dbs, upgrade_db.db_pattern)) + if not valid_databases: + build._log('_run_configure_upgrade', 'No datase found for pattern %s' % (upgrade_db.db_pattern), level='ERROR') + for db in valid_databases: + #commit_ids = build.params_id.commit_ids + #if commit_ids != target.params_id.commit_ids: + # repo_ids = commit_ids.mapped('repo_id') + # for commit_link in target.params_id.commit_link_ids: + # if commit_link.commit_id.repo_id not in repo_ids: + # additionnal_commit_links |= commit_link + # build._log('', 'Adding sources from build [%s](%s)' % (target.id, target.build_url), log_type='markdown') + + child = build._add_child({ + 'upgrade_to_build_id': target.id, + 'upgrade_from_build_id': source, + 'dump_db': db.id, + 'config_id': self.upgrade_config_id + }) + + child.description = 'Testing migration from %s to %s using db %s (%s)' % ( + source.params_id.version_id.name, + target.params_id.version_id.name, + db.name, + config_id.name + ) + # TODO log somewhere if no db at all is found for a db_suffix + + def _get_upgrade_source_versions(self, target_version): + if self.upgrade_from_version_ids: + return self.upgrade_from_version_ids + else: + versions = self.env['runbot.version'].browse() + if self.upgrade_from_previous_major_version: + versions |= target_version.previous_major_version_id + if self.upgrade_from_all_intermediate_version: + versions |= target_version.intermediate_version_ids + elif self.upgrade_from_last_intermediate_version: + if target_version.intermediate_version_ids: + versions |= target_version.intermediate_version_ids[-1] + return versions + + def _get_upgrade_source_builds(self, target_version, builds_references_by_version_id): + versions = self._get_upgrade_source_versions(target_version) + from_builds = self.env['runbot.build'].browse() + for version in versions: + from_builds |= builds_references_by_version_id.get(version.id) or self.env['runbot.build'].browse() + return from_builds.sorted(lambda b: b.params_id.version_id.number) + + def _filter_upgrade_database(self, dbs, pattern): + pat_list = pattern.split(',') if pattern else [] + for db in dbs: + if any(fnmatch.fnmatch(db.db_suffix, pat) for pat in pat_list): + yield db + + def _run_test_upgrade(self, build, log_path): + target = build.params_id.upgrade_to_build_id + commit_ids = build.params_id.commit_ids + target_commit_ids = target.params_id.commit_ids + if commit_ids != target_commit_ids: + target_repo_ids = target_commit_ids.mapped('repo_id') + for commit in commit_ids: + if commit.repo_id not in target_repo_ids: + target_commit_ids |= commit + build._log('', 'Adding sources from build [%s](%s)' % (target.id, target.build_url), log_type='markdown') + build = build.with_context(defined_commit_ids=target_commit_ids) + exports = build._checkout() + + dump_db = build.params_id.dump_db + + migrate_db_name = '%s-%s' % (build.dest, dump_db.db_suffix) # only ok if restore does not force db_suffix + + migrate_cmd = build._cmd() + migrate_cmd += ['-u all'] + migrate_cmd += ['-d', migrate_db_name] + migrate_cmd += ['--stop-after-init'] + migrate_cmd += ['--max-cron-threads=0'] + # migrate_cmd += ['--upgrades-paths', '/%s' % migration_scripts] upgrades-paths is broken, ln is created automatically in sources + + build._log('run', 'Start migration build %s' % build.dest) + timeout = self.cpu_limit + + migrate_cmd.finals.append(['psql', migrate_db_name, '-c', '"SELECT id, name, state FROM ir_module_module WHERE state NOT IN (\'installed\', \'uninstalled\', \'uninstallable\') AND name NOT LIKE \'test_%\' "', '>', '/data/build/logs/modules_states.txt']) + + env_variables = self.additionnal_env.split(';') if self.additionnal_env else [] + exception_env = self.env['runbot.upgrade.exception']._generate() + if exception_env: + env_variables.append(exception_env) + docker_run(migrate_cmd, log_path, build._path(), build._get_docker_name(), cpu_limit=timeout, ro_volumes=exports, env_variables=env_variables) + + def _run_restore(self, build, log_path): + # exports = build._checkout() + params = build.params_id + + if 'dump_url' in params.config_data: + dump_url = params.config_data['dump_url'] + zip_name = dump_url.split('/')[-1] + build._log('test-migration', 'Restoring db [%s](%s)' % (zip_name, dump_url), log_type='markdown') + else: + download_db_suffix = params.dump_db.db_suffix or self.restore_download_db_suffix + dump_build = params.dump_db.build_id or build.parent_id + assert download_db_suffix and dump_build + download_db_name = '%s-%s' % (dump_build.dest, download_db_suffix) + zip_name = '%s.zip' % download_db_name + dump_url = '%s%s' % (dump_build.http_log_url(), zip_name) + build._log('test-migration', 'Restoring dump [%s](%s) from build [%s](%s)' % (zip_name, dump_url, dump_build.id, dump_build.build_url), log_type='markdown') + restore_suffix = self.restore_rename_db_suffix or params.dump_db.db_suffix + assert restore_suffix + restore_db_name = '%s-%s' % (build.dest, restore_suffix) + + build._local_pg_createdb(restore_db_name) + cmd = ' && '.join([ + 'mkdir /data/build/restore', + 'cd /data/build/restore', + 'wget %s' % dump_url, + 'unzip -q %s' % zip_name, + 'echo "### restoring filestore"', + 'mkdir -p /data/build/datadir/filestore/%s' % restore_db_name, + 'mv filestore/* /data/build/datadir/filestore/%s' % restore_db_name, + 'echo "###restoring db"', + 'psql -q %s < dump.sql' % (restore_db_name), + 'cd /data/build', + 'echo "### cleaning"', + 'rm -r restore', + 'echo "### listing modules"', + """psql %s -c "select name from ir_module_module where state = 'installed'" -t -A > /data/build/logs/restore_modules_installed.txt""" % restore_db_name, + + ]) + + docker_run(cmd, log_path, build._path(), build._get_docker_name(), cpu_limit=self.cpu_limit) + + def _reference_builds(self, bundle, trigger): + upgrade_dumps_trigger_id = trigger.upgrade_dumps_trigger_id + refs_batches = self._reference_batches(bundle, trigger) + refs_builds = refs_batches.mapped('slot_ids').filtered( + lambda slot: slot.trigger_id == upgrade_dumps_trigger_id + ).mapped('build_id') + # should we filter on active? implicit. On match type? on skipped ? + # is last_"done"_batch enough? + # TODO active test false and take last done/running build limit 1 -> in case of rebuild + return refs_builds + + def _is_upgrade_step(self): + return self.job_type in ('configure_upgrade', 'configure_upgrade_complement') + + def _reference_batches(self, bundle, trigger): + if self.job_type == 'configure_upgrade_complement': + return self._reference_batches_complement(bundle, trigger) + else: + return self._reference_batches_upgrade(bundle, trigger.upgrade_dumps_trigger_id.category_id.id) + + def _reference_batches_complement(self, bundle, trigger): + category_id = trigger.upgrade_dumps_trigger_id.category_id.id + version = bundle.version_id + next_versions = version.next_major_version_id | version.next_intermediate_version_ids # TODO filter on trigger version + target_versions = version.browse() + + upgrade_complement_step = trigger.upgrade_dumps_trigger_id.upgrade_step_id + + if next_versions: + for next_version in next_versions: + if bundle.version_id in upgrade_complement_step._get_upgrade_source_versions(next_version): + target_versions |= next_version + return target_versions.with_context( + category_id=category_id, project_id=bundle.project_id.id + ).mapped('base_bundle_id.last_done_batch') + + def _reference_batches_upgrade(self, bundle, category_id): + target_refs_bundles = self.env['runbot.bundle'] + sticky_domain = [('sticky', '=', True), ('project_id', '=', bundle.project_id.id)] + if self.upgrade_to_version_ids: + target_refs_bundles |= self.env['runbot.bundle'].search(sticky_domain + [('version_id', 'in', self.upgrade_to_version_ids.ids)]) + else: + if self.upgrade_to_master: + target_refs_bundles |= self.env['runbot.bundle'].search(sticky_domain + [('name', '=', 'master')]) + if self.upgrade_to_all_versions: + target_refs_bundles |= self.env['runbot.bundle'].search(sticky_domain + [('name', '!=', 'master')]) + elif self.upgrade_to_major_versions: + target_refs_bundles |= self.env['runbot.bundle'].search(sticky_domain + [('name', '!=', 'master'), ('version_id.is_major', '=', True)]) + + source_refs_bundles = self.env['runbot.bundle'] + + def from_versions(f_bundle): + nonlocal source_refs_bundles + if self.upgrade_from_previous_major_version: + source_refs_bundles |= f_bundle.previous_major_version_base_id + if self.upgrade_from_all_intermediate_version: + source_refs_bundles |= f_bundle.intermediate_version_base_ids + elif self.upgrade_from_last_intermediate_version: + if f_bundle.intermediate_version_base_ids: + source_refs_bundles |= f_bundle.intermediate_version_base_ids[-1] + + if self.upgrade_from_version_ids: + source_refs_bundles |= self.env['runbot.bundle'].search(sticky_domain + [('version_id', 'in', self.upgrade_from_version_ids.ids)]) + # this is subject to discussion. should this be smart and filter 'from_versions' or should it be flexible and do all possibilities + else: + if self.upgrade_to_current: + from_versions(bundle) + for f_bundle in target_refs_bundles: + from_versions(f_bundle) + + return (target_refs_bundles | source_refs_bundles).with_context( + category_id=category_id + ).mapped('last_done_batch') + def log_end(self, build): if self.job_type == 'create_build': build._logger('Step %s finished in %s' % (self.name, s2human(build.job_time))) @@ -421,7 +781,7 @@ class ConfigStep(models.Model): kwargs = dict(message='Step %s finished in %s' % (self.name, s2human(build.job_time))) if self.job_type == 'install_odoo': kwargs['message'] += ' $$fa-download$$' - db_suffix = build.config_data.get('db_name') or self.db_name + db_suffix = build.params_id.config_data.get('db_name') or self.db_name kwargs['path'] = '%s%s-%s.zip' % (build.http_log_url(), build.dest, db_suffix) kwargs['log_type'] = 'link' build._log('', **kwargs) @@ -461,11 +821,11 @@ class ConfigStep(models.Model): def _coverage_params(self, build, modules_to_install): pattern_to_omit = set() - for commit in build._get_all_commit(): + for commit in build.params_id.commit_ids: docker_source_folder = build._docker_source_folder(commit) - for manifest_file in commit.repo.manifest_files.split(','): + for manifest_file in commit.repo_id.manifest_files.split(','): pattern_to_omit.add('*%s' % manifest_file) - for (addons_path, module, _) in build._get_available_modules(commit): + for (addons_path, module, _) in commit._get_available_modules(): if module not in modules_to_install: # we want to omit docker_source_folder/[addons/path/]module/* module_path_in_docker = os.path.join(docker_source_folder, addons_path, module) @@ -484,6 +844,8 @@ class ConfigStep(models.Model): build_values.update(self._make_coverage_results(build)) if self.test_enable or self.test_tags: build_values.update(self._make_tests_results(build)) + elif self.job_type == 'test_upgrade': + build_values.update(self._make_upgrade_results(build)) return build_values def _make_python_results(self, build): @@ -511,6 +873,35 @@ class ConfigStep(models.Model): build._log('coverage_result', 'Coverage file not found', level='WARNING') return build_values + def _make_upgrade_results(self, build): + build_values = {} + build._log('upgrade', 'Getting results for build %s' % build.dest) + + if build.local_result != 'ko': + checkers = [ + self._check_log, + self._check_module_loaded, + self._check_error, + self._check_module_states, + self._check_build_ended, + self._check_warning, + ] + local_result = self._get_checkers_result(build, checkers) + build_values['local_result'] = build._get_worst_result([build.local_result, local_result]) + + return build_values + + def _check_module_states(self, build): + if not build.is_file('logs/modules_states.txt'): + build._log('', '"logs/modules_states.txt" file not found.', level='ERROR') + return 'ko' + + content = build.read_file('logs/modules_states.txt') or '' + if '(0 rows)' not in content: + build._log('', 'Some modules are not in installed/uninstalled/uninstallable state after migration. \n %s' % content) + return 'ko' + return 'ok' + def _check_log(self, build): log_path = build._path('logs', '%s.txt' % self.name) if not os.path.isfile(log_path): @@ -577,7 +968,7 @@ class ConfigStep(models.Model): return build_values def _make_stats(self, build): - if not ((build.branch_id.make_stats or build.config_data.get('make_stats')) and self.make_stats): + if not self.make_stats: # TODO garbage collect non sticky stat return build._log('make_stats', 'Getting stats from log file') log_path = build._path('logs', '%s.txt' % self.name) @@ -592,6 +983,7 @@ class ConfigStep(models.Model): self.env['runbot.build.stat']._write_key_values(build, self, key_values) except Exception as e: message = '**An error occured while computing statistics of %s:**\n`%s`' % (build.job, str(e).replace('\\n', '\n').replace("\\'", "'")) + _logger.exception(message) build._log('make_stats', message, level='INFO', log_type='markdown') def _step_state(self): diff --git a/runbot/models/build_dependency.py b/runbot/models/build_dependency.py deleted file mode 100644 index 0c8090dd..00000000 --- a/runbot/models/build_dependency.py +++ /dev/null @@ -1,16 +0,0 @@ -from odoo import models, fields - - -class RunbotBuildDependency(models.Model): - _name = "runbot.build.dependency" - _description = "Build dependency" - - build_id = fields.Many2one('runbot.build', 'Build', required=True, ondelete='cascade', index=True) - dependecy_repo_id = fields.Many2one('runbot.repo', 'Dependency repo', required=True, ondelete='cascade') - dependency_hash = fields.Char('Name of commit', index=True) - closest_branch_id = fields.Many2one('runbot.branch', 'Branch', ondelete='cascade') - match_type = fields.Char('Match Type') - - def _get_repo(self): - return self.closest_branch_id.repo_id or self.dependecy_repo_id - diff --git a/runbot/models/build_error.py b/runbot/models/build_error.py index 57b8b28a..394e6f17 100644 --- a/runbot/models/build_error.py +++ b/runbot/models/build_error.py @@ -10,7 +10,7 @@ from odoo.exceptions import ValidationError _logger = logging.getLogger(__name__) -class RunbotBuildError(models.Model): +class BuildError(models.Model): _name = "runbot.build.error" _description = "Build error" @@ -24,16 +24,16 @@ class RunbotBuildError(models.Model): module_name = fields.Char('Module name') # name in ir_logging function = fields.Char('Function name') # func name in ir logging fingerprint = fields.Char('Error fingerprint', index=True) - random = fields.Boolean('underterministic error', track_visibility='onchange') - responsible = fields.Many2one('res.users', 'Assigned fixer', track_visibility='onchange') - fixing_commit = fields.Char('Fixing commit', track_visibility='onchange') + random = fields.Boolean('underterministic error', tracking=True) + responsible = fields.Many2one('res.users', 'Assigned fixer', tracking=True) + fixing_commit = fields.Char('Fixing commit', tracking=True) build_ids = fields.Many2many('runbot.build', 'runbot_build_error_ids_runbot_build_rel', string='Affected builds') - branch_ids = fields.Many2many('runbot.branch', compute='_compute_branch_ids') - repo_ids = fields.Many2many('runbot.repo', compute='_compute_repo_ids') - active = fields.Boolean('Error is not fixed', default=True, track_visibility='onchange') + bundle_ids = fields.One2many('runbot.bundle', compute='_compute_bundle_ids') + trigger_ids = fields.Many2many('runbot.trigger', compute='_compute_trigger_ids') + active = fields.Boolean('Error is not fixed', default=True, tracking=True) tag_ids = fields.Many2many('runbot.build.error.tag', string='Tags') - build_count = fields.Integer(compute='_compute_build_counts', string='Nb seen', stored=True) - parent_id = fields.Many2one('runbot.build.error', 'Linked to') + build_count = fields.Integer(compute='_compute_build_counts', string='Nb seen') + parent_id = fields.Many2one('runbot.build.error', 'Linked to', index=True) child_ids = fields.One2many('runbot.build.error', 'parent_id', string='Child Errors', context={'active_test': False}) children_build_ids = fields.Many2many('runbot.build', compute='_compute_children_build_ids', string='Children builds') error_history_ids = fields.Many2many('runbot.build.error', compute='_compute_error_history_ids', string='Old errors', context={'active_test': False}) @@ -63,7 +63,7 @@ class RunbotBuildError(models.Model): if 'active' in vals: for build_error in self: (build_error.child_ids - self).write({'active': vals['active']}) - return super(RunbotBuildError, self).write(vals) + return super(BuildError, self).write(vals) @api.depends('build_ids') def _compute_build_counts(self): @@ -71,14 +71,15 @@ class RunbotBuildError(models.Model): build_error.build_count = len(build_error.children_build_ids) @api.depends('build_ids') - def _compute_branch_ids(self): + def _compute_bundle_ids(self): for build_error in self: - build_error.branch_ids = build_error.mapped('build_ids.branch_id') + top_parent_builds = build_error.build_ids.mapped(lambda rec: rec and rec._get_top_parent()) + build_error.bundle_ids = top_parent_builds.mapped('slot_ids').mapped('batch_id.bundle_id') @api.depends('build_ids') - def _compute_repo_ids(self): + def _compute_trigger_ids(self): for build_error in self: - build_error.repo_ids = build_error.mapped('build_ids.repo_id') + build_error.trigger_ids = build_error.mapped('build_ids.params_id.trigger_id') @api.depends('content') def _compute_summary(self): @@ -134,7 +135,6 @@ class RunbotBuildError(models.Model): build.build_error_ids += build_error del hash_dict[build_error.fingerprint] - fixed_errors_dict = {rec.fingerprint: rec for rec in self.env['runbot.build.error'].search([('fingerprint', 'in', list(hash_dict.keys())), ('active', '=', False)])} # create an error for the remaining entries for fingerprint, logs in hash_dict.items(): build_error = self.env['runbot.build.error'].create({ @@ -161,7 +161,7 @@ class RunbotBuildError(models.Model): @api.model def test_tags_list(self): - active_errors = self.search([('test_tags', '!=', False), ('random', '=', True)]) + active_errors = self.search([('test_tags', '!=', False)]) test_tag_list = active_errors.mapped('test_tags') return [test_tag for error_tags in test_tag_list for test_tag in (error_tags).split(',')] @@ -170,7 +170,7 @@ class RunbotBuildError(models.Model): return ['-%s' % tag for tag in self.test_tags_list()] -class RunbotBuildErrorTag(models.Model): +class BuildErrorTag(models.Model): _name = "runbot.build.error.tag" _description = "Build error tag" @@ -179,7 +179,7 @@ class RunbotBuildErrorTag(models.Model): error_ids = fields.Many2many('runbot.build.error', string='Errors') -class RunbotErrorRegex(models.Model): +class ErrorRegex(models.Model): _name = "runbot.error.regex" _description = "Build error regex" diff --git a/runbot/models/build_stat.py b/runbot/models/build_stat.py index ea41b47a..fd7f5188 100644 --- a/runbot/models/build_stat.py +++ b/runbot/models/build_stat.py @@ -5,7 +5,7 @@ from odoo import models, fields, api, tools _logger = logging.getLogger(__name__) -class RunbotBuildStat(models.Model): +class BuildStat(models.Model): _name = "runbot.build.stat" _description = "Statistics" _sql_constraints = [ @@ -45,54 +45,64 @@ class RunbotBuildStatSql(models.Model): _description = "Build stat sql view" _auto = False - id = fields.Many2one("runbot.build.stat", readonly=True) + bundle_id = fields.Many2one("runbot.bundle", string="Bundle", readonly=True) + bundle_name = fields.Char(string="Bundle name", readonly=True) + bundle_sticky = fields.Boolean(string="Sticky", readonly=True) + batch_id = fields.Many2one("runbot.bundle", string="Batch", readonly=True) + trigger_id = fields.Many2one("runbot.trigger", string="Trigger", readonly=True) + trigger_name = fields.Char(string="Trigger name", readonly=True) + + stat_id = fields.Many2one("runbot.build.stat", string="Stat", readonly=True) key = fields.Char("Key", readonly=True) value = fields.Float("Value", readonly=True) + config_step_id = fields.Many2one( "runbot.build.config.step", string="Config Step", readonly=True ) config_step_name = fields.Char(String="Config Step name", readonly=True) + build_id = fields.Many2one("runbot.build", string="Build", readonly=True) build_config_id = fields.Many2one("runbot.build.config", string="Config", readonly=True) - build_name = fields.Char(String="Build name", readonly=True) build_parent_path = fields.Char('Build Parent path') build_host = fields.Char(string="Host", readonly=True) - branch_id = fields.Many2one("runbot.branch", string="Branch", readonly=True) - branch_name = fields.Char(string="Branch name", readonly=True) - branch_sticky = fields.Boolean(string="Sticky", readonly=True) - repo_id = fields.Many2one("runbot.repo", string="Repo", readonly=True) - repo_name = fields.Char(string="Repo name", readonly=True) def init(self): """ Create SQL view for build stat """ tools.drop_view_if_exists(self._cr, "runbot_build_stat_sql") self._cr.execute( - """ CREATE VIEW runbot_build_stat_sql AS ( + """ CREATE OR REPLACE VIEW runbot_build_stat_sql AS ( SELECT - stat.id AS id, + (stat.id::bigint*(2^32)+bun.id::bigint) AS id, + stat.id AS stat_id, stat.key AS key, stat.value AS value, step.id AS config_step_id, step.name AS config_step_name, bu.id AS build_id, - bu.config_id AS build_config_id, + bp.config_id AS build_config_id, bu.parent_path AS build_parent_path, - bu.name AS build_name, bu.host AS build_host, - br.id AS branch_id, - br.branch_name AS branch_name, - br.sticky AS branch_sticky, - repo.id AS repo_id, - repo.name AS repo_name + bun.id AS bundle_id, + bun.name AS bundle_name, + bun.sticky AS bundle_sticky, + ba.id AS batch_id, + tr.id AS trigger_id, + tr.name AS trigger_name FROM runbot_build_stat AS stat JOIN runbot_build_config_step step ON stat.config_step_id = step.id JOIN - runbot_build bu ON stat.build_id = bu.id + runbot_build bu ON bu.id = stat.build_id JOIN - runbot_branch br ON br.id = bu.branch_id + runbot_build_params bp ON bp.id =bu.params_id JOIN - runbot_repo repo ON br.repo_id = repo.id + runbot_batch_slot bas ON bas.build_id = stat.build_id + JOIN + runbot_trigger tr ON tr.id = bas.trigger_id + JOIN + runbot_batch ba ON ba.id = bas.batch_id + JOIN + runbot_bundle bun ON bun.id = ba.bundle_id )""" - ) + ) \ No newline at end of file diff --git a/runbot/models/build_stat_regex.py b/runbot/models/build_stat_regex.py index d6246ef4..73ff935c 100644 --- a/runbot/models/build_stat_regex.py +++ b/runbot/models/build_stat_regex.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import logging -import os + +from ..common import os import re from odoo import models, fields, api @@ -11,7 +12,7 @@ VALUE_PATTERN = r"\(\?P\.+\)" # used to verify value group pattern _logger = logging.getLogger(__name__) -class RunbotBuildStatRegex(models.Model): +class BuildStatRegex(models.Model): """ A regular expression to extract a float/int value from a log file The regulare should contain a named group like '(?P.+)'. The result will be a key/value like {name: value} @@ -59,8 +60,8 @@ class RunbotBuildStatRegex(models.Model): value = float(group_dict.get("value")) except ValueError: _logger.warning( - 'The matched value (%s) of "%s" cannot be converted into float' - % (group_dict.get("value"), build_stat_regex.regex) + 'The matched value (%s) of "%s" cannot be converted into float', + group_dict.get("value"), build_stat_regex.regex ) continue key = ( diff --git a/runbot/models/bundle.py b/runbot/models/bundle.py new file mode 100644 index 00000000..9b4830a2 --- /dev/null +++ b/runbot/models/bundle.py @@ -0,0 +1,222 @@ +import time +import logging +import datetime +import subprocess + +from collections import defaultdict +from odoo import models, fields, api, tools +from ..common import dt2time, s2human_long + +_logger = logging.getLogger(__name__) + + +class Bundle(models.Model): + _name = 'runbot.bundle' + _description = "Bundle" + + name = fields.Char('Bundle name', required=True, help="Name of the base branch") + project_id = fields.Many2one('runbot.project', required=True, index=True) + branch_ids = fields.One2many('runbot.branch', 'bundle_id') + + # custom behaviour + no_build = fields.Boolean('No build') + no_auto_run = fields.Boolean('No run') + build_all = fields.Boolean('Force all triggers') + modules = fields.Char("Modules to install", help="Comma-separated list of modules to install and test.") + + batch_ids = fields.One2many('runbot.batch', 'bundle_id') + last_batch = fields.Many2one('runbot.batch', index=True, domain=lambda self: [('category_id', '=', self.env.ref('runbot.default_category').id)]) + last_batchs = fields.Many2many('runbot.batch', 'Last batchs', compute='_compute_last_batchs') + last_done_batch = fields.Many2many('runbot.batch', 'Last batchs', compute='_compute_last_done_batch') + + sticky = fields.Boolean('Sticky', compute='_compute_sticky', store=True, index=True) + is_base = fields.Boolean('Is base', index=True) + defined_base_id = fields.Many2one('runbot.bundle', 'Forced base bundle', domain="[('project_id', '=', project_id), ('is_base', '=', True)]") + base_id = fields.Many2one('runbot.bundle', 'Base bundle', compute='_compute_base_id', store=True) + + version_id = fields.Many2one('runbot.version', 'Version', compute='_compute_version_id', store=True) + version_number = fields.Char(related='version_id.number', store=True, index=True) + + previous_major_version_base_id = fields.Many2one('runbot.bundle', 'Previous base bundle', compute='_compute_relations_base_id') + intermediate_version_base_ids = fields.Many2many('runbot.bundle', 'Intermediate base bundles', compute='_compute_relations_base_id') + + priority = fields.Boolean('Build priority', default=False) + + trigger_custom_ids = fields.One2many('runbot.bundle.trigger.custom', 'bundle_id') + auto_rebase = fields.Boolean('Auto rebase', default=False) + + @api.depends('sticky') + def _compute_make_stats(self): + for bundle in self: + bundle.make_stats = bundle.sticky + + @api.depends('is_base') + def _compute_sticky(self): + for bundle in self: + bundle.sticky = bundle.is_base + + @api.depends('name', 'is_base', 'defined_base_id', 'base_id.is_base', 'project_id') + def _compute_base_id(self): + for bundle in self: + if bundle.is_base: + bundle.base_id = bundle + continue + if bundle.defined_base_id: + bundle.base_id = bundle.defined_base_id + continue + project_id = bundle.project_id.id + master_base = False + for bid, bname in self._get_base_ids(project_id): + if bundle.name.startswith('%s-' % bname): + bundle.base_id = self.browse(bid) + break + elif bname == 'master': + master_base = self.browse(bid) + else: + bundle.base_id = master_base + + @tools.ormcache('project_id') + def _get_base_ids(self, project_id): + return [(b.id, b.name) for b in self.search([('is_base', '=', True), ('project_id', '=', project_id)])] + + @api.depends('is_base', 'base_id.version_id') + def _compute_version_id(self): + for bundle in self.sorted(key='is_base', reverse=True): + if not bundle.is_base: + bundle.version_id = bundle.base_id.version_id + continue + bundle.version_id = self.env['runbot.version']._get(bundle.name) + + @api.depends('version_id') + def _compute_relations_base_id(self): + for bundle in self: + bundle = bundle.with_context(project_id=bundle.project_id.id) + bundle.previous_major_version_base_id = bundle.version_id.previous_major_version_id.base_bundle_id + bundle.intermediate_version_base_ids = bundle.version_id.intermediate_version_ids.mapped('base_bundle_id') + + @api.depends_context('category_id') + def _compute_last_batchs(self): + if self: + batch_ids = defaultdict(list) + category_id = self.env.context.get('category_id', self.env['ir.model.data'].xmlid_to_res_id('runbot.default_category')) + self.env.cr.execute(""" + SELECT + id + FROM ( + SELECT + batch.id AS id, + row_number() OVER (PARTITION BY batch.bundle_id order by batch.id desc) AS row + FROM + runbot_bundle bundle INNER JOIN runbot_batch batch ON bundle.id=batch.bundle_id + WHERE + bundle.id in %s + AND batch.category_id = %s + ) AS bundle_batch + WHERE + row <= 4 + ORDER BY row, id desc + """, [tuple(self.ids), category_id] + ) + batchs = self.env['runbot.batch'].browse([r[0] for r in self.env.cr.fetchall()]) + for batch in batchs: + batch_ids[batch.bundle_id.id].append(batch.id) + + for bundle in self: + bundle.last_batchs = [(6, 0, batch_ids[bundle.id])] + + @api.depends_context('category_id') + def _compute_last_done_batch(self): + if self: + # self.env['runbot.batch'].flush() + for bundle in self: + bundle.last_done_batch = False + category_id = self.env.context.get('category_id', self.env['ir.model.data'].xmlid_to_res_id('runbot.default_category')) + self.env.cr.execute(""" + SELECT + id + FROM ( + SELECT + batch.id AS id, + row_number() OVER (PARTITION BY batch.bundle_id order by batch.id desc) AS row + FROM + runbot_bundle bundle INNER JOIN runbot_batch batch ON bundle.id=batch.bundle_id + WHERE + bundle.id in %s + AND batch.state = 'done' + AND batch.category_id = %s + ) AS bundle_batch + WHERE + row = 1 + ORDER BY row, id desc + """, [tuple(self.ids), category_id] + ) + batchs = self.env['runbot.batch'].browse([r[0] for r in self.env.cr.fetchall()]) + for batch in batchs: + batch.bundle_id.last_done_batch = batch + + def create(self, values_list): + res = super().create(values_list) + if res.is_base: + model = self.browse() + model._get_base_ids.clear_cache(model) + return res + + def write(self, values): + super().write(values) + if 'is_base' in values: + model = self.browse() + model._get_base_ids.clear_cache(model) + + def _force(self, category_id=None, auto_rebase=False): + self.ensure_one() + if self.last_batch.state == 'preparing': + return + values = { + 'last_update': fields.Datetime.now(), + 'bundle_id': self.id, + 'state': 'preparing', + } + if category_id: + values['category_id'] = category_id + new = self.env['runbot.batch'].create(values) + self.last_batch = new + new.sudo()._prepare(auto_rebase or self.auto_rebase) + return new + + def consistency_warning(self): + if self.defined_base_id: + return [('info', 'This bundle has a forced base: %s' % self.defined_base_id.name)] + warnings = [] + for branch in self.branch_ids: + if branch.is_pr and branch.target_branch_name != self.base_id.name: + if branch.target_branch_name.startswith(self.base_id.name): + warnings.append(('info', 'PR %s targeting a non base branch: %s' % (branch.dname, branch.target_branch_name))) + else: + warnings.append(('warning' if branch.alive else 'info', 'PR %s targeting wrong version: %s (expecting %s)' % (branch.dname, branch.target_branch_name, self.base_id.name))) + elif not branch.is_pr and not branch.name.startswith(self.base_id.name) and not self.defined_base_id: + warnings.append(('warning', 'Branch %s not starting with version name (%s)' % (branch.dname, self.base_id.name))) + return warnings + + def branch_groups(self): + self.branch_ids.sorted(key=lambda b: (b.remote_id.repo_id.sequence, b.remote_id.repo_id.id, b.is_pr)) + branch_groups = {repo: [] for repo in self.branch_ids.mapped('remote_id.repo_id').sorted('sequence')} + for branch in self.branch_ids.sorted(key=lambda b: (b.is_pr)): + branch_groups[branch.remote_id.repo_id].append(branch) + return branch_groups + + +class BundleTriggerCustomisation(models.Model): + _name = 'runbot.bundle.trigger.custom' + _description = 'Custom trigger' + + trigger_id = fields.Many2one('runbot.trigger', domain="[('project_id', '=', bundle_id.project_id)]") + bundle_id = fields.Many2one('runbot.bundle') + config_id = fields.Many2one('runbot.build.config') + + _sql_constraints = [ + ( + "bundle_custom_trigger_unique", + "unique (bundle_id, trigger_id)", + "Only one custom trigger per trigger per bundle is allowed", + ) + ] diff --git a/runbot/models/commit.py b/runbot/models/commit.py new file mode 100644 index 00000000..fea0ee2b --- /dev/null +++ b/runbot/models/commit.py @@ -0,0 +1,226 @@ + +import subprocess + +from ..common import os, RunbotException +import glob +import shutil + +from odoo import models, fields, api, registry +import logging + +_logger = logging.getLogger(__name__) + + +class Commit(models.Model): + _name = 'runbot.commit' + _description = "Commit" + + _sql_constraints = [ + ( + "commit_unique", + "unique (name, repo_id, rebase_on_id)", + "Commit must be unique to ensure correct duplicate matching", + ) + ] + name = fields.Char('SHA') + repo_id = fields.Many2one('runbot.repo', string='Repo group') + date = fields.Datetime('Commit date') + author = fields.Char('Author') + author_email = fields.Char('Author Email') + committer = fields.Char('Committer') + committer_email = fields.Char('Committer Email') + subject = fields.Text('Subject') + dname = fields.Char('Display name', compute='_compute_dname') + rebase_on_id = fields.Many2one('runbot.commit', 'Rebase on commit') + + def _get(self, name, repo_id, vals=None, rebase_on_id=False): + commit = self.search([('name', '=', name), ('repo_id', '=', repo_id), ('rebase_on_id', '=', rebase_on_id)]) + if not commit: + commit = self.env['runbot.commit'].create({**(vals or {}), 'name': name, 'repo_id': repo_id, 'rebase_on_id': rebase_on_id}) + return commit + + def _rebase_on(self, commit): + if self == commit: + return self + return self._get(self.name, self.repo_id.id, self.read()[0], commit.id) + + def _get_available_modules(self): + for manifest_file_name in self.repo_id.manifest_files.split(','): # '__manifest__.py' '__openerp__.py' + for addons_path in (self.repo_id.addons_paths or '').split(','): # '' 'addons' 'odoo/addons' + sep = os.path.join(addons_path, '*') + for manifest_path in glob.glob(self._source_path(sep, manifest_file_name)): + module = os.path.basename(os.path.dirname(manifest_path)) + yield (addons_path, module, manifest_file_name) + + def export(self): + """Export a git repo into a sources""" + # TODO add automated tests + self.ensure_one() + + export_path = self._source_path() + + if os.path.isdir(export_path): + _logger.info('git export: exporting to %s (already exists)', export_path) + return export_path + + + _logger.info('git export: exporting to %s (new)', export_path) + os.makedirs(export_path) + + self.repo_id._fetch(self.name) + export_sha = self.name + if self.rebase_on_id: + export_sha = self.rebase_on_id.name + self.rebase_on_id.repo_id._fetch(export_sha) + + p1 = subprocess.Popen(['git', '--git-dir=%s' % self.repo_id.path, 'archive', export_sha], stderr=subprocess.PIPE, stdout=subprocess.PIPE) + p2 = subprocess.Popen(['tar', '-xmC', export_path], stdin=p1.stdout, stdout=subprocess.PIPE) + p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. + (_, err) = p2.communicate() + p1.poll() # fill the returncode + if p1.returncode: + raise RunbotException("Git archive failed for %s with error code %s. (%s)" % (self.name, p1.returncode, p1.stderr.read().decode())) + if err: + raise RunbotException("Export for %s failed. (%s)" % (self.name, err)) + + if self.rebase_on_id: + # we could be smart here and detect if merge_base == commit, in witch case checkouting base_commit is enough. Since we don't have this info + # and we are exporting in a custom folder anyway, lets + _logger.info('Applying patch for %s', self.name) + p1 = subprocess.Popen(['git', '--git-dir=%s' % self.repo_id.path, 'diff', '%s...%s' % (export_sha, self.name)], stderr=subprocess.PIPE, stdout=subprocess.PIPE) + p2 = subprocess.Popen(['patch', '-p0', '-d', export_path], stdin=p1.stdout, stdout=subprocess.PIPE) + p1.stdout.close() + (message, err) = p2.communicate() + p1.poll() + if err: + shutil.rmtree(export_path) + raise RunbotException("Apply patch failed for %s...%s. (%s)" % (export_sha, self.name, err)) + if p1.returncode or p2.returncode: + shutil.rmtree(export_path) + raise RunbotException("Apply patch failed for %s...%s with error code %s+%s. (%s)" % (export_sha, self.name, p1.returncode, p2.returncode, message)) + + # migration scripts link if necessary + icp = self.env['ir.config_parameter'] + ln_param = icp.get_param('runbot_migration_ln', default='') + migration_repo_id = int(icp.get_param('runbot_migration_repo_id', default=0)) + if ln_param and migration_repo_id and self.repo_id.server_files: + scripts_dir = self.env['runbot.repo'].browse(migration_repo_id).name + try: + os.symlink('/data/build/%s' % scripts_dir, self._source_path(ln_param)) + except FileNotFoundError: + _logger.warning('Impossible to create migration symlink') + + return export_path + + def read_source(self, file, mode='r'): + file_path = self._source_path(file) + try: + with open(file_path, mode) as f: + return f.read() + except: + return False + + def _source_path(self, *path): + export_name = self.name + if self.rebase_on_id: + export_name = '%s_%s' % (self.name, self.rebase_on_id.name) + return os.path.join(self.env['runbot.runbot']._root(), 'sources', self.repo_id.name, export_name, *path) + + @api.depends('name', 'repo_id.name') + def _compute_dname(self): + for commit in self: + commit.dname = '%s:%s' % (commit.repo_id.name, commit.name[:8]) + + def _github_status(self, build, context, state, target_url, description=None, post_commit=True): + self.ensure_one() + Status = self.env['runbot.commit.status'] + last_status = Status.search([('commit_id', '=', self.id), ('context', '=', context)], order='id desc', limit=1) + if last_status and last_status.state == state: + _logger.info('Skipping already sent status %s:%s for %s', context, state, self.name) + return + last_status = Status.create({ + 'build_id': build.id if build else False, + 'commit_id': self.id, + 'context': context, + 'state': state, + 'target_url': target_url, + 'description': description or context, + }) + last_status._send(post_commit) + + +class CommitLink(models.Model): + _name = 'runbot.commit.link' + _description = "Build commit" + + commit_id = fields.Many2one('runbot.commit', 'Commit', required=True, index=True) + # Link info + match_type = fields.Selection([('new', 'New head of branch'), ('head', 'Head of branch'), ('base_head', 'Found on base branch'), ('base_match', 'Found on base branch')]) # HEAD, DEFAULT + branch_id = fields.Many2one('runbot.branch', string='Found in branch') # Shouldn't be use for anything else than display + + base_commit_id = fields.Many2one('runbot.commit', 'Base head commit', index=True) + merge_base_commit_id = fields.Many2one('runbot.commit', 'Merge Base commit', index=True) + base_behind = fields.Integer('# commits behind base') + base_ahead = fields.Integer('# commits ahead base') + file_changed = fields.Integer('# file changed') + diff_add = fields.Integer('# line added') + diff_remove = fields.Integer('# line removed') + + +class CommitStatus(models.Model): + _name = 'runbot.commit.status' + _description = 'Commit status' + _order = 'id desc' + + commit_id = fields.Many2one('runbot.commit', string='Commit', required=True, index=True) + context = fields.Char('Context', required=True) + state = fields.Char('State', required=True) + build_id = fields.Many2one('runbot.build', string='Build', index=True) + target_url = fields.Char('Url') + description = fields.Char('Description') + sent_date = fields.Datetime('Sent Date') + + def _send(self, post_commit=True): + user_id = self.env.user.id + _dbname = self.env.cr.dbname + _context = self.env.context + + status_id = self.id + commit = self.commit_id + all_remote = commit.repo_id.remote_ids + remotes = all_remote.filtered(lambda remote: remote.token) + no_token_remote = all_remote-remotes + if no_token_remote: + _logger.warning('No token on remote %s, skipping status', no_token_remote.mapped("name")) + remote_ids = remotes.ids + commit_name = commit.name + + status = { + 'context': self.context, + 'state': self.state, + 'target_url': self.target_url, + 'description': self.description, + } + if remote_ids: + + def send_github_status(env): + for remote in env['runbot.remote'].browse(remote_ids): + _logger.debug( + "github updating %s status %s to %s in repo %s", + status['context'], commit_name, status['state'], remote.name) + remote._github('/repos/:owner/:repo/statuses/%s' % commit_name, status, ignore_errors=True) + env['runbot.commit.status'].browse(status_id).sent_date = fields.Datetime.now() + + def send_github_status_async(): + try: + db_registry = registry(_dbname) + with api.Environment.manage(), db_registry.cursor() as cr: + env = api.Environment(cr, user_id, _context) + send_github_status(env) + except: + _logger.exception('Something went wrong sending notification for %s', commit_name) + + if post_commit: + self._cr.after('commit', send_github_status_async) + else: + send_github_status(self.env) diff --git a/runbot/models/database.py b/runbot/models/database.py new file mode 100644 index 00000000..87ff69a2 --- /dev/null +++ b/runbot/models/database.py @@ -0,0 +1,23 @@ +import logging +from odoo import models, fields, api +_logger = logging.getLogger(__name__) + + +class Database(models.Model): + _name = 'runbot.database' + _description = "Database" + + name = fields.Char('Host name', required=True, unique=True) + build_id = fields.Many2one('runbot.build', index=True, required=True) + db_suffix = fields.Char(compute='_compute_db_suffix') + + def _compute_db_suffix(self): + for record in self: + record.db_suffix = record.name.replace('%s-' % record.build_id.dest, '') + + @api.model_create_single + def create(self, values): + res = self.search([('name', '=', values['name']), ('build_id', '=', values['build_id'])]) + if res: + return res + return super().create(values) diff --git a/runbot/models/event.py b/runbot/models/event.py index 95c32be2..a069453d 100644 --- a/runbot/models/event.py +++ b/runbot/models/event.py @@ -80,7 +80,7 @@ FOR EACH ROW EXECUTE PROCEDURE runbot_set_logging_build(); class RunbotErrorLog(models.Model): - _name = "runbot.error.log" + _name = 'runbot.error.log' _description = "Error log" _auto = False _order = 'id desc' @@ -95,30 +95,23 @@ class RunbotErrorLog(models.Model): path = fields.Char(string='Path', readonly=True) line = fields.Char(string='Line', readonly=True) build_id = fields.Many2one('runbot.build', string='Build', readonly=True) - bu_name = fields.Char(String='Build name', readonly=True) + #bu_name = fields.Char(String='Build name', readonly=True) as aggregate dest = fields.Char(String='Build dest', readonly=True) local_state = fields.Char(string='Local state', readonly=True) local_result = fields.Char(string='Local result', readonly=True) global_state = fields.Char(string='Global state', readonly=True) global_result = fields.Char(string='Global result', readonly=True) bu_create_date = fields.Datetime(string='Build create date', readonly=True) - committer = fields.Char(string='committer', readonly=True) - author = fields.Char(string='Author', readonly=True) host = fields.Char(string='Host', readonly=True) - config_id = fields.Many2one('runbot.build.config', string='Config', readonly=True) parent_id = fields.Many2one('runbot.build', string='Parent build', readonly=True) - hidden = fields.Boolean(string='Hidden', readonly=True) - branch_id = fields.Many2one('runbot.branch', string='Branch', readonly=True) - branch_name = fields.Char(string='Branch name', readonly=True) - branch_sticky = fields.Boolean(string='Sticky', readonly=True) - repo_id = fields.Many2one('runbot.repo', string='Repo', readonly=True) - repo_name = fields.Char(string='Repo name', readonly=True) - repo_short_name = fields.Char(compute='_compute_repo_short_name', readonly=True) + #bundle_id = fields.Many2one('runbot.bundle', string='Bundle', readonly=True) + #bundle_name = fields.Char(string='Bundle name', readonly=True) + #bundle_sticky = fields.Boolean(string='Sticky', readonly=True) build_url = fields.Char(compute='_compute_build_url', readonly=True) def _compute_repo_short_name(self): for l in self: - l.repo_short_name = '/'.join(l.repo_id.base.split('/')[-2:]) + l.repo_short_name = '%s/%s' % (l.repo_id.owner, l.repo_id.repo_name) def _compute_build_url(self): for l in self: @@ -152,32 +145,18 @@ class RunbotErrorLog(models.Model): l.path AS path, l.line AS line, bu.id AS build_id, - bu.name AS bu_name, bu.dest AS dest, bu.local_state AS local_state, bu.local_result AS local_result, bu.global_state AS global_state, bu.global_result AS global_result, bu.create_date AS bu_create_date, - bu.committer AS committer, - bu.author AS author, bu.host AS host, - bu.config_id AS config_id, - bu.parent_id AS parent_id, - bu.hidden AS hidden, - br.id AS branch_id, - br.branch_name AS branch_name, - br.sticky AS branch_sticky, - re.id AS repo_id, - re.name AS repo_name + bu.parent_id AS parent_id FROM ir_logging AS l JOIN runbot_build bu ON l.build_id = bu.id - JOIN - runbot_branch br ON br.id = bu.branch_id - JOIN - runbot_repo re ON br.repo_id = re.id WHERE l.level = 'ERROR' )""") diff --git a/runbot/models/host.py b/runbot/models/host.py index 347a80f9..266030e9 100644 --- a/runbot/models/host.py +++ b/runbot/models/host.py @@ -1,26 +1,28 @@ import logging -import os - from odoo import models, fields, api -from ..common import fqdn, local_pgadmin_cursor +from ..common import fqdn, local_pgadmin_cursor, os from ..container import docker_build _logger = logging.getLogger(__name__) -class RunboHost(models.Model): - _name = "runbot.host" +class Host(models.Model): + _name = 'runbot.host' _description = "Host" _order = 'id' _inherit = 'mail.thread' name = fields.Char('Host name', required=True, unique=True) disp_name = fields.Char('Display name') - active = fields.Boolean('Active', default=True) + active = fields.Boolean('Active', default=True, tracking=True) last_start_loop = fields.Datetime('Last start') last_end_loop = fields.Datetime('Last end') last_success = fields.Datetime('Last success') - assigned_only = fields.Boolean('Only accept assigned build', default=False) - nb_worker = fields.Integer('Number of max paralel build', help="0 to use icp value", default=0) + assigned_only = fields.Boolean('Only accept assigned build', default=False, tracking=True) + nb_worker = fields.Integer( + 'Number of max paralel build', + default=lambda self: self.env['ir.config_parameter'].sudo().get_param('runbot.runbot_workers', default=2), + tracking=True + ) nb_testing = fields.Integer(compute='_compute_nb') nb_running = fields.Integer(compute='_compute_nb') last_exception = fields.Char('Last exception') @@ -43,20 +45,20 @@ class RunboHost(models.Model): @api.model_create_single def create(self, values): - if not 'disp_name' in values: + if 'disp_name' not in values: values['disp_name'] = values['name'] return super().create(values) def _bootstrap_db_template(self): """ boostrap template database if needed """ icp = self.env['ir.config_parameter'] - db_template = icp.get_param('runbot.runbot_db_template', default='template1') - if db_template and db_template != 'template1': + db_template = icp.get_param('runbot.runbot_db_template', default='template0') + if db_template and db_template != 'template0': with local_pgadmin_cursor() as local_cr: local_cr.execute("""SELECT datname FROM pg_catalog.pg_database WHERE datname = '%s';""" % db_template) res = local_cr.fetchone() if not res: - local_cr.execute("""CREATE DATABASE "%s" TEMPLATE template1 LC_COLLATE 'C' ENCODING 'unicode'""" % db_template) + local_cr.execute("""CREATE DATABASE "%s" TEMPLATE template0 LC_COLLATE 'C' ENCODING 'unicode'""" % db_template) # TODO UPDATE pg_database set datallowconn = false, datistemplate = true (but not enough privileges) def _bootstrap(self): @@ -78,17 +80,13 @@ class RunboHost(models.Model): return os.path.abspath(os.path.join(os.path.dirname(__file__), '../static')) @api.model - def _get_current(self): - name = fqdn() + def _get_current(self, suffix=''): + name = '%s%s' % (fqdn(), suffix) return self.search([('name', '=', name)]) or self.create({'name': name}) - def get_nb_worker(self): - icp = self.env['ir.config_parameter'] - return self.nb_worker or int(icp.sudo().get_param('runbot.runbot_workers', default=6)) - def get_running_max(self): icp = self.env['ir.config_parameter'] - return int(icp.get_param('runbot.runbot_running_max', default=75)) + return int(icp.get_param('runbot.runbot_running_max', default=5)) def set_psql_conn_count(self): _logger.debug('Updating psql connection count...') @@ -102,7 +100,7 @@ class RunboHost(models.Model): return sum(host.nb_testing for host in self) def _total_workers(self): - return sum(host.get_nb_worker() for host in self) + return sum(host.nb_worker for host in self) def disable(self): """ Reserve host if possible """ diff --git a/runbot/models/ir_cron.py b/runbot/models/ir_cron.py index 527afa26..04e59eb2 100644 --- a/runbot/models/ir_cron.py +++ b/runbot/models/ir_cron.py @@ -6,6 +6,7 @@ from odoo import models, fields odoo.service.server.SLEEP_INTERVAL = 5 odoo.addons.base.models.ir_cron._intervalTypes['seconds'] = lambda interval: relativedelta(seconds=interval) + class ir_cron(models.Model): _inherit = "ir.cron" diff --git a/runbot/models/ir_ui_view.py b/runbot/models/ir_ui_view.py new file mode 100644 index 00000000..08e4ba3e --- /dev/null +++ b/runbot/models/ir_ui_view.py @@ -0,0 +1,15 @@ +from ..common import s2human, s2human_long +from odoo import models +from odoo.http import request + + +class IrUiView(models.Model): + _inherit = ["ir.ui.view"] + + def _prepare_qcontext(self): + qcontext = super(IrUiView, self)._prepare_qcontext() + + if request and getattr(request, 'is_frontend', False): + qcontext['s2human'] = s2human + qcontext['s2human_long'] = s2human_long + return qcontext diff --git a/runbot/models/project.py b/runbot/models/project.py new file mode 100644 index 00000000..7ac9e99f --- /dev/null +++ b/runbot/models/project.py @@ -0,0 +1,20 @@ +from odoo import models, fields + + +class Project(models.Model): + _name = 'runbot.project' + _description = 'Project' + + name = fields.Char('Project name', required=True, unique=True) + group_ids = fields.Many2many('res.groups', string='Required groups') + + trigger_ids = fields.One2many('runbot.trigger', 'project_id', string='Triggers') + + +class Category(models.Model): + _name = 'runbot.category' + _description = 'Trigger category' + + name = fields.Char("Name") + icon = fields.Char("Font awesome icon") + view_id = fields.Many2one('ir.ui.view', "Link template") diff --git a/runbot/models/repo.py b/runbot/models/repo.py index 983c5605..33a7c945 100644 --- a/runbot/models/repo.py +++ b/runbot/models/repo.py @@ -1,90 +1,248 @@ # -*- coding: utf-8 -*- import datetime -import dateutil import json import logging -import random import re -import requests -import signal import subprocess import time -import glob -import shutil -from odoo.exceptions import UserError, ValidationError -from odoo.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT -from odoo import models, fields, api, registry -from odoo.modules.module import get_module_resource -from odoo.tools import config -from odoo.osv import expression -from ..common import fqdn, dt2time, Commit, dest_reg, os -from ..container import docker_ps, docker_stop -from psycopg2.extensions import TransactionRollbackError +import dateutil +import requests + +from pathlib import Path + +from odoo import models, fields, api +from ..common import os, RunbotException +from odoo.exceptions import UserError +from odoo.tools.safe_eval import safe_eval _logger = logging.getLogger(__name__) -class RunbotException(Exception): - pass -class runbot_repo(models.Model): +def _sanitize(name): + for i in '@:/': + name = name.replace(i, '_') + return name + + +class Trigger(models.Model): + """ + List of repo parts that must be part of the same bundle + """ + + _name = 'runbot.trigger' + _inherit = 'mail.thread' + _description = 'Triggers' - _name = "runbot.repo" - _description = "Repo" _order = 'sequence, id' - name = fields.Char('Repository', required=True) - short_name = fields.Char('Short name', compute='_compute_short_name', store=False, readonly=True) sequence = fields.Integer('Sequence') + name = fields.Char("Name") + description = fields.Char("Description", help="Informative description") + project_id = fields.Many2one('runbot.project', string="Project id", required=True) # main/security/runbot + repo_ids = fields.Many2many('runbot.repo', relation='runbot_trigger_triggers', string="Triggers", domain="[('project_id', '=', project_id)]") + dependency_ids = fields.Many2many('runbot.repo', relation='runbot_trigger_dependencies', string="Dependencies") + config_id = fields.Many2one('runbot.build.config', string="Config", required=True) + + ci_context = fields.Char("Ci context", default='ci/runbot', tracking=True) + category_id = fields.Many2one('runbot.category', default=lambda self: self.env.ref('runbot.default_category', raise_if_not_found=False)) + version_domain = fields.Char(string="Version domain") + hide = fields.Boolean('Hide batch on main page') # TODO adapt and fix (cla, ...) + manual = fields.Boolean('Only start trigger manually', default=False) + + upgrade_dumps_trigger_id = fields.Many2one('runbot.trigger', string= 'Template/complement trigger', tracking=True) + upgrade_step_id = fields.Many2one('runbot.build.config.step', compute="_compute_upgrade_step_id", store=True) + ci_url = fields.Char("ci url") + ci_description = fields.Char("ci description") + + @api.depends('upgrade_dumps_trigger_id', 'config_id', 'config_id.step_order_ids.step_id.job_type') + def _compute_upgrade_step_id(self): + for trigger in self: + trigger.upgrade_step_id = False + if trigger.upgrade_dumps_trigger_id: + trigger.upgrade_step_id = self._upgrade_step_from_config(trigger.config_id) + + def _upgrade_step_from_config(self, config): + upgrade_step = next((step_order.step_id for step_order in config.step_order_ids if step_order.step_id._is_upgrade_step()), False) + if not upgrade_step: + raise UserError('Upgrade trigger should have a config with step of type Configure Upgrade') + return upgrade_step + + def _reference_builds(self, bundle): + self.ensure_one() + if self.upgrade_step_id: # this is an upgrade trigger, add corresponding builds + custom_config = next((trigger_custom.config_id for trigger_custom in bundle.trigger_custom_ids if trigger_custom.trigger_id == self), False) + step = self._upgrade_step_from_config(custom_config) if custom_config else self.upgrade_step_id + refs_builds = step._reference_builds(bundle, self) + return [(4, b.id) for b in refs_builds] + return [] + + def get_version_domain(self): + if self.version_domain: + return safe_eval(self.version_domain) + return [] + + +class Remote(models.Model): + """ + Regroups repo and it duplicates (forks): odoo+odoo-dev for each repo + """ + _name = 'runbot.remote' + _description = 'Remote' + _order = 'sequence, id' + _inherit = 'mail.thread' + + name = fields.Char('Url', required=True, tracking=True) + repo_id = fields.Many2one('runbot.repo', required=True, tracking=True) + + owner = fields.Char(compute='_compute_base_infos', string='Repo Owner', store=True, readonly=True, tracking=True) + repo_name = fields.Char(compute='_compute_base_infos', string='Repo Name', store=True, readonly=True, tracking=True) + repo_domain = fields.Char(compute='_compute_base_infos', string='Repo domain', store=True, readonly=True, tracking=True) + + base_url = fields.Char(compute='_compute_base_url', string='Base URL', readonly=True, tracking=True) + + short_name = fields.Char('Short name', compute='_compute_short_name', tracking=True) + remote_name = fields.Char('Remote name', compute='_compute_remote_name', tracking=True) + + sequence = fields.Integer('Sequence', tracking=True) + fetch_heads = fields.Boolean('Fetch branches', default=True, tracking=True) + fetch_pull = fields.Boolean('Fetch PR', default=False, tracking=True) + + token = fields.Char("Github token", groups="runbot.group_runbot_admin") + + @api.depends('name') + def _compute_base_infos(self): + for remote in self: + name = re.sub('.+@', '', remote.name) + name = re.sub('^https://', '', name) # support https repo style + name = re.sub('.git$', '', name) + name = name.replace(':', '/') + s = name.split('/') + remote.repo_domain = s[-3] + remote.owner = s[-2] + remote.repo_name = s[-1] + + @api.depends('repo_domain', 'owner', 'repo_name') + def _compute_base_url(self): + for remote in self: + remote.base_url = '%s/%s/%s' % (remote.repo_domain, remote.owner, remote.repo_name) + + @api.depends('name', 'base_url') + def _compute_short_name(self): + for remote in self: + remote.short_name = '/'.join(remote.base_url.split('/')[-2:]) + + def _compute_remote_name(self): + for remote in self: + remote.remote_name = _sanitize(remote.short_name) + + def create(self, values_list): + remote = super().create(values_list) + if not remote.repo_id.main_remote_id: + remote.repo_id.main_remote_id = remote + remote._cr.after('commit', remote.repo_id._update_git_config) + return remote + + def write(self, values): + res = super().write(values) + self._cr.after('commit', self.repo_id._update_git_config) + return res + + def _github(self, url, payload=None, ignore_errors=False, nb_tries=2, recursive=False): + generator = self.sudo()._github_generator(url, payload=payload, ignore_errors=ignore_errors, nb_tries=nb_tries, recursive=recursive) + if recursive: + return generator + result = list(generator) + return result[0] if result else False + + def _github_generator(self, url, payload=None, ignore_errors=False, nb_tries=2, recursive=False): + """Return a http request to be sent to github""" + for remote in self: + if remote.owner and remote.repo_name and remote.repo_domain: + url = url.replace(':owner', remote.owner) + url = url.replace(':repo', remote.repo_name) + url = 'https://api.%s%s' % (remote.repo_domain, url) + session = requests.Session() + if remote.token: + session.auth = (remote.token, 'x-oauth-basic') + session.headers.update({'Accept': 'application/vnd.github.she-hulk-preview+json'}) + while url: + if recursive: + _logger.info('Getting page %s', url) + try_count = 0 + while try_count < nb_tries: + try: + if payload: + response = session.post(url, data=json.dumps(payload)) + else: + response = session.get(url) + response.raise_for_status() + if try_count > 0: + _logger.info('Success after %s tries', (try_count + 1)) + if recursive: + link = response.headers.get('link') + url = False + if link: + url = {link.split(';')[1]: link.split(';')[0] for link in link.split(',')}.get(' rel="next"') + if url: + url = url.strip('<> ') + yield response.json() + break + else: + yield response.json() + return + except requests.HTTPError: + try_count += 1 + if try_count < nb_tries: + time.sleep(2) + else: + if ignore_errors: + _logger.exception('Ignored github error %s %r (try %s/%s)', url, payload, try_count, nb_tries) + url = False + else: + raise + + +class Repo(models.Model): + + _name = 'runbot.repo' + _description = "Repo" + _order = 'sequence, id' + _inherit = 'mail.thread' + + name = fields.Char("Name", unique=True, tracking=True) # odoo/enterprise/upgrade/security/runbot/design_theme + identity_file = fields.Char("Identity File", help="Identity file to use with git/ssh", groups="runbot.group_runbot_admin") + main_remote_id = fields.Many2one('runbot.remote', "Main remote", tracking=True) + remote_ids = fields.One2many('runbot.remote', 'repo_id', "Remotes") + project_id = fields.Many2one('runbot.project', required=True, tracking=True, + help="Default bundle project to use when pushing on this repos", + default=lambda self: self.env.ref('runbot.main_project', raise_if_not_found=False)) + # -> not verry usefull, remove it? (iterate on projects or contraints triggers: + # all trigger where a repo is used must be in the same project. + modules = fields.Char("Modules to install", help="Comma-separated list of modules to install and test.", tracking=True) + server_files = fields.Char('Server files', help='Comma separated list of possible server files', tracking=True) # odoo-bin,openerp-server,openerp-server.py + manifest_files = fields.Char('Manifest files', help='Comma separated list of possible manifest files', default='__manifest__.py', tracking=True) + addons_paths = fields.Char('Addons paths', help='Comma separated list of possible addons path', default='', tracking=True) + + sequence = fields.Integer('Sequence', tracking=True) path = fields.Char(compute='_get_path', string='Directory', readonly=True) - base = fields.Char(compute='_get_base_url', string='Base URL', readonly=True) # Could be renamed to a more explicit name like base_url - nginx = fields.Boolean('Nginx') mode = fields.Selection([('disabled', 'Disabled'), ('poll', 'Poll'), ('hook', 'Hook')], default='poll', - string="Mode", required=True, help="hook: Wait for webhook on /runbot/hook/ i.e. github push event") + string="Mode", required=True, help="hook: Wait for webhook on /runbot/hook/ i.e. github push event", tracking=True) hook_time = fields.Float('Last hook time', compute='_compute_hook_time') + last_processed_hook_time = fields.Float('Last processed hook time') get_ref_time = fields.Float('Last refs db update', compute='_compute_get_ref_time') - duplicate_id = fields.Many2one('runbot.repo', 'Duplicate repo', help='Repository for finding duplicate builds') - modules = fields.Char("Modules to install", help="Comma-separated list of modules to install and test.") - modules_auto = fields.Selection([('none', 'None (only explicit modules list)'), - ('repo', 'Repository modules (excluding dependencies)'), - ('all', 'All modules (including dependencies)')], - default='all', - string="Other modules to install automatically") - - dependency_ids = fields.Many2many( - 'runbot.repo', 'runbot_repo_dep_rel', column1='dependant_id', column2='dependency_id', - string='Extra dependencies', - help="Community addon repos which need to be present to run tests.") - token = fields.Char("Github token", groups="runbot.group_runbot_admin") - group_ids = fields.Many2many('res.groups', string='Limited to groups') - - repo_config_id = fields.Many2one('runbot.build.config', 'Repo Config') - config_id = fields.Many2one('runbot.build.config', 'Run Config', compute='_compute_config_id', inverse='_inverse_config_id') - - server_files = fields.Char('Server files', help='Comma separated list of possible server files') # odoo-bin,openerp-server,openerp-server.py - manifest_files = fields.Char('Manifest files', help='Comma separated list of possible manifest files', default='__manifest__.py') - addons_paths = fields.Char('Addons paths', help='Comma separated list of possible addons path', default='') - no_build = fields.Boolean("No build", help="Forbid creation of build on this repo", default=False) - - def _compute_config_id(self): - for repo in self: - if repo.repo_config_id: - repo.config_id = repo.repo_config_id - else: - repo.config_id = self.env.ref('runbot.runbot_build_config_default') - - def _inverse_config_id(self): - for repo in self: - repo.repo_config_id = repo.config_id + trigger_ids = fields.Many2many('runbot.trigger', relation='runbot_trigger_triggers', readonly=True) + forbidden_regex = fields.Char('Forbidden regex', help="Regex that forid bundle creation if branch name is matching", tracking=True) + invalid_branch_message = fields.Char('Forbidden branch message', tracking=True) def _compute_get_ref_time(self): self.env.cr.execute(""" SELECT repo_id, time FROM runbot_repo_reftime WHERE id IN ( - SELECT max(id) FROM runbot_repo_reftime + SELECT max(id) FROM runbot_repo_reftime WHERE repo_id = any(%s) GROUP BY repo_id ) """, [self.ids]) @@ -96,7 +254,7 @@ class runbot_repo(models.Model): self.env.cr.execute(""" SELECT repo_id, time FROM runbot_repo_hooktime WHERE id IN ( - SELECT max(id) FROM runbot_repo_hooktime + SELECT max(id) FROM runbot_repo_hooktime WHERE repo_id = any(%s) GROUP BY repo_id ) """, [self.ids]) @@ -127,102 +285,36 @@ class runbot_repo(models.Model): ) """) - def _root(self): - """Return root directory of repository""" - default = os.path.join(os.path.dirname(__file__), '../static') - return os.path.abspath(default) - - def _source_path(self, sha, *path): - """ - returns the absolute path to the source folder of the repo (adding option *path) - """ - self.ensure_one() - return os.path.join(self._root(), 'sources', self._get_repo_name_part(), sha, *path) - @api.depends('name') def _get_path(self): """compute the server path of repo from the name""" - root = self._root() + root = self.env['runbot.runbot']._root() for repo in self: - repo.path = os.path.join(root, 'repo', repo._sanitized_name(repo.name)) - - @api.model - def _sanitized_name(self, name): - for i in '@:/': - name = name.replace(i, '_') - return name - - @api.depends('name') - def _get_base_url(self): - for repo in self: - name = re.sub('.+@', '', repo.name) - name = re.sub('^https://', '', name) # support https repo style - name = re.sub('.git$', '', name) - name = name.replace(':', '/') - repo.base = name - - @api.depends('name', 'base') - def _compute_short_name(self): - for repo in self: - repo.short_name = '/'.join(repo.base.split('/')[-2:]) - - def _get_repo_name_part(self): - self.ensure_one() - return self._sanitized_name(self.name.split('/')[-1]) + repo.path = os.path.join(root, 'repo', _sanitize(repo.name)) def _git(self, cmd): """Execute a git command 'cmd'""" self.ensure_one() - _logger.debug("git command: git (dir %s) %s", self.short_name, ' '.join(cmd)) - cmd = ['git', '--git-dir=%s' % self.path] + cmd + config_args = [] + if self.identity_file: + config_args = ['-c', 'core.sshCommand=ssh -i %s/.ssh/%s' % (str(Path.home()), self.identity_file)] + cmd = ['git', '-C', self.path] + config_args + cmd + _logger.info("git command: %s", ' '.join(cmd)) return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode() - def _git_rev_parse(self, branch_name): - return self._git(['rev-parse', branch_name]).strip() - - def _git_export(self, sha): - """Export a git repo into a sources""" - # TODO add automated tests - self.ensure_one() - export_path = self._source_path(sha) - - if os.path.isdir(export_path): - _logger.info('git export: checkouting to %s (already exists)' % export_path) - return export_path - + def _fetch(self, sha): if not self._hash_exists(sha): self._update(force=True) if not self._hash_exists(sha): - try: - result = self._git(['fetch', 'origin', sha]) - except: - pass + for remote in self.remote_ids: + try: + self._git(['fetch', remote.remote_name, sha]) + _logger.info('Success fetching specific head %s on %s', sha, remote) + break + except subprocess.CalledProcessError: + pass if not self._hash_exists(sha): - raise RunbotException("Commit %s is unreachable. Did you force push the branch since build creation?" % sha) - - _logger.info('git export: checkouting to %s (new)' % export_path) - os.makedirs(export_path) - - p1 = subprocess.Popen(['git', '--git-dir=%s' % self.path, 'archive', sha], stdout=subprocess.PIPE) - p2 = subprocess.Popen(['tar', '-xmC', export_path], stdin=p1.stdout, stdout=subprocess.PIPE) - p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. - (out, err) = p2.communicate() - if err: - raise RunbotException("Archive %s failed. Did you force push the branch since build creation? (%s)" % (sha, err)) - - # migration scripts link if necessary - icp = self.env['ir.config_parameter'] - ln_param = icp.get_param('runbot_migration_ln', default='') - migration_repo_id = int(icp.get_param('runbot_migration_repo_id', default=0)) - if ln_param and migration_repo_id and self.server_files: - scripts_dir = self.env['runbot.repo'].browse(migration_repo_id)._get_repo_name_part() - try: - os.symlink('/data/build/%s' % scripts_dir, self._source_path(sha, ln_param)) - except FileNotFoundError: - _logger.warning('Impossible to create migration symlink') - - # TODO get result and fallback on cleaing in case of problem - return export_path + raise RunbotException("Commit %s is unreachable. Did you force push the branch?" % sha) def _hash_exists(self, commit_hash): """ Verify that a commit hash exists in the repo """ @@ -233,47 +325,20 @@ class runbot_repo(models.Model): return False return True - def _github(self, url, payload=None, ignore_errors=False, nb_tries=2): - """Return a http request to be sent to github""" - for repo in self: - if not repo.token: - return - match_object = re.search('([^/]+)/([^/]+)/([^/.]+(.git)?)', repo.base) - if match_object: - url = url.replace(':owner', match_object.group(2)) - url = url.replace(':repo', match_object.group(3)) - url = 'https://api.%s%s' % (match_object.group(1), url) - session = requests.Session() - session.auth = (repo.token, 'x-oauth-basic') - session.headers.update({'Accept': 'application/vnd.github.she-hulk-preview+json'}) - try_count = 0 - while try_count < nb_tries: - try: - if payload: - response = session.post(url, data=json.dumps(payload)) - else: - response = session.get(url) - response.raise_for_status() - if try_count > 0: - _logger.info('Success after %s tries' % (try_count + 1)) - return response.json() - except Exception as e: - try_count += 1 - if try_count < nb_tries: - time.sleep(2) - else: - if ignore_errors: - _logger.exception('Ignored github error %s %r (try %s/%s)' % (url, payload, try_count + 1, nb_tries)) - else: - raise + def _is_branch_forbidden(self, branch_name): + self.ensure_one() + if self.forbidden_regex: + return re.match(self.forbidden_regex, branch_name) + return False def _get_fetch_head_time(self): self.ensure_one() fname_fetch_head = os.path.join(self.path, 'FETCH_HEAD') if os.path.exists(fname_fetch_head): return os.path.getmtime(fname_fetch_head) + return 0 - def _get_refs(self): + def _get_refs(self, max_age=30): """Find new refs :return: list of tuples with following refs informations: name, sha, date, author, author_email, subject, committer, committer_email @@ -282,14 +347,24 @@ class runbot_repo(models.Model): get_ref_time = round(self._get_fetch_head_time(), 4) if not self.get_ref_time or get_ref_time > self.get_ref_time: - self.set_ref_time(get_ref_time) - fields = ['refname', 'objectname', 'committerdate:iso8601', 'authorname', 'authoremail', 'subject', 'committername', 'committeremail'] - fmt = "%00".join(["%(" + field + ")" for field in fields]) - git_refs = self._git(['for-each-ref', '--format', fmt, '--sort=-committerdate', 'refs/heads', 'refs/pull']) - git_refs = git_refs.strip() - return [tuple(field for field in line.split('\x00')) for line in git_refs.split('\n')] - else: - return [] + try: + self.set_ref_time(get_ref_time) + fields = ['refname', 'objectname', 'committerdate:iso8601', 'authorname', 'authoremail', 'subject', 'committername', 'committeremail'] + fmt = "%00".join(["%(" + field + ")" for field in fields]) + cmd = ['for-each-ref', '--format', fmt, '--sort=-committerdate', 'refs/*/heads/*'] + if any(remote.fetch_pull for remote in self.remote_ids): + cmd.append('refs/*/pull/*') + git_refs = self._git(cmd) + git_refs = git_refs.strip() + if not git_refs: + return [] + refs = [tuple(field for field in line.split('\x00')) for line in git_refs.split('\n')] + refs = [r for r in refs if dateutil.parser.parse(r[2][:19]) + datetime.timedelta(days=max_age) > datetime.datetime.now()] + return refs + except Exception: + _logger.exception('Fail to get refs for repo %s', self.name) + self.env['runbot.runbot'].warning('Fail to get refs for repo %s', self.name) + return [] def _find_or_create_branches(self, refs): """Parse refs and create branches that does not exists yet @@ -298,20 +373,31 @@ class runbot_repo(models.Model): The returned structure contains all the branches from refs newly created or older ones. """ - Branch = self.env['runbot.branch'] - self.env.cr.execute(""" - WITH t (branch) AS (SELECT unnest(%s)) - SELECT t.branch, b.id - FROM t LEFT JOIN runbot_branch b ON (b.name = t.branch) - WHERE b.repo_id = %s; - """, ([r[0] for r in refs], self.id)) - ref_branches = {r[0]: r[1] for r in self.env.cr.fetchall()} - for name, sha, date, author, author_email, subject, committer, committer_email in refs: - if not ref_branches.get(name): - _logger.debug('repo %s found new branch %s', self.name, name) - new_branch = Branch.create({'repo_id': self.id, 'name': name}) - ref_branches[name] = new_branch.id + # FIXME WIP + names = [r[0].split('/')[-1] for r in refs] + branches = self.env['runbot.branch'].search([('name', 'in', names), ('remote_id', 'in', self.remote_ids.ids)]) + ref_branches = {branch.ref(): branch for branch in branches} + new_branch_values = [] + for ref_name, sha, date, author, author_email, subject, committer, committer_email in refs: + if not ref_branches.get(ref_name): + # format example: + # refs/ruodoo-dev/heads/12.0-must-fail + # refs/ruodoo/pull/1 + _, remote_name, branch_type, name = ref_name.split('/') + remote_id = self.remote_ids.filtered(lambda r: r.remote_name == remote_name).id + if not remote_id: + _logger.warning('Remote %s not found', remote_name) + continue + new_branch_values.append({'remote_id': remote_id, 'name': name, 'is_pr': branch_type == 'pull'}) + # TODO catch error for pr info. It may fail for multiple raison. closed? external? check corner cases + _logger.info('new branch %s found in %s', name, self.name) + if new_branch_values: + _logger.info('Creating new branches') + # TODO ASAP dont fail all if pr status fail, aka github is dans les choux + new_branches = self.env['runbot.branch'].create(new_branch_values) + for branch in new_branches: + ref_branches[branch.ref()] = branch return ref_branches def _find_new_commits(self, refs, ref_branches): @@ -321,469 +407,145 @@ class runbot_repo(models.Model): described in _find_or_create_branches """ self.ensure_one() - Branch = self.env['runbot.branch'] - Build = self.env['runbot.build'] - icp = self.env['ir.config_parameter'] - max_age = int(icp.get_param('runbot.runbot_max_age', default=30)) - self.env.cr.execute(""" - SELECT DISTINCT ON (branch_id) name, branch_id - FROM runbot_build WHERE branch_id in %s AND build_type = 'normal' AND parent_id is null ORDER BY branch_id,id DESC; - """, (tuple([ref_branches[r[0]] for r in refs]),)) - # generate a set of tuples (branch_id, sha) - builds_candidates = {(r[1], r[0]) for r in self.env.cr.fetchall()} + for ref_name, sha, date, author, author_email, subject, committer, committer_email in refs: + branch = ref_branches[ref_name] + if branch.head_name != sha: # new push on branch + _logger.info('repo %s branch %s new commit found: %s', self.name, branch.name, sha) - for name, sha, date, author, author_email, subject, committer, committer_email in refs: - branch = Branch.browse(ref_branches[name]) + commit = self.env['runbot.commit']._get(sha, self.id, { + 'author': author, + 'author_email': author_email, + 'committer': committer, + 'committer_email': committer_email, + 'subject': subject, + 'date': dateutil.parser.parse(date[:19]), + }) + branch.head = commit + branch.alive = True + # Not perfect, il some case a pr can be closed but still visible in repo. + # The head wont change but on creation the branch will be set alive even if git into said pr is closed + # It is still better to have false open than false closed - # skip the build for old branches (Could be checked before creating the branch in DB ?) - if dateutil.parser.parse(date[:19]) + datetime.timedelta(days=max_age) < datetime.datetime.now(): - continue + if branch.reference_name and branch.remote_id and branch.remote_id.repo_id._is_branch_forbidden(branch.reference_name ): + message = "This branch name is incorrect. Branch name should be prefixed with a valid version" + message = branch.remote_id.repo_id.invalid_branch_message or message + branch.head._github_status(False, "Branch naming", 'failure', False, message) - # create build (and mark previous builds as skipped) if not found - if not (branch.id, sha) in builds_candidates: - if branch.no_auto_build or branch.no_build or (branch.repo_id.no_build and not branch.rebuild_requested): + if not self.trigger_ids: continue - if branch.rebuild_requested: - branch.rebuild_requested = False - _logger.debug('repo %s branch %s new build found revno %s', self.name, branch.name, sha) - build_info = { - 'branch_id': branch.id, - 'name': sha, - 'author': author, - 'author_email': author_email, - 'committer': committer, - 'committer_email': committer_email, - 'subject': subject, - 'date': dateutil.parser.parse(date[:19]), - 'build_type': 'normal', - } - if not branch.sticky: - # pending builds are skipped as we have a new ref - builds_to_skip = Build.search( - [('branch_id', '=', branch.id), ('local_state', '=', 'pending')], - order='sequence asc') - builds_to_skip._skip(reason='New ref found') - if builds_to_skip: - build_info['sequence'] = builds_to_skip[0].sequence - Build.create(build_info) + bundle = branch.bundle_id + if bundle.no_build: + continue - def _create_pending_builds(self): + if bundle.last_batch.state != 'preparing' and commit not in bundle.last_batch.commit_ids: + preparing = self.env['runbot.batch'].create({ + 'last_update': fields.Datetime.now(), + 'bundle_id': bundle.id, + 'state': 'preparing', + }) + bundle.last_batch = preparing + + if bundle.last_batch.state == 'preparing': + bundle.last_batch._new_commit(branch) + + def _update_batches(self, force=False): """ Find new commits in physical repos""" - refs = {} - ref_branches = {} + updated = False for repo in self: - try: - ref = repo._get_refs() + if repo.remote_ids and self._update(poll_delay=30 if force else 60*5): max_age = int(self.env['ir.config_parameter'].get_param('runbot.runbot_max_age', default=30)) - good_refs = [r for r in ref if dateutil.parser.parse(r[2][:19]) + datetime.timedelta(days=max_age) > datetime.datetime.now()] - if good_refs: - refs[repo] = good_refs - except Exception: - _logger.exception('Fail to get refs for repo %s', repo.name) - if repo in refs: - ref_branches[repo] = repo._find_or_create_branches(refs[repo]) + ref = repo._get_refs(max_age) + ref_branches = repo._find_or_create_branches(ref) + repo._find_new_commits(ref, ref_branches) + updated = True + return updated - # keep _find_or_create_branches separated from build creation to ease - # closest branch detection + def _update_git_config(self): + """ Update repo git config file """ for repo in self: - if repo in refs: - repo._find_new_commits(refs[repo], ref_branches[repo]) + if os.path.isdir(os.path.join(repo.path, 'refs')): + git_config_path = os.path.join(repo.path, 'config') + template_params = {'repo': repo} + git_config = self.env['ir.ui.view'].render_template("runbot.git_config", template_params) + with open(git_config_path, 'wb') as config_file: + config_file.write(git_config) + _logger.info('Config updated for repo %s' % repo.name) + else: + _logger.info('Repo not cloned, skiping config update for %s' % repo.name) - def _clone(self): + def _git_init(self): """ Clone the remote repo if needed """ self.ensure_one() repo = self if not os.path.isdir(os.path.join(repo.path, 'refs')): - _logger.info("Cloning repository '%s' in '%s'" % (repo.name, repo.path)) - subprocess.call(['git', 'clone', '--bare', repo.name, repo.path]) + _logger.info("Initiating repository '%s' in '%s'" % (repo.name, repo.path)) + git_init = subprocess.run(['git', 'init', '--bare', repo.path], stderr=subprocess.PIPE) + if git_init.returncode: + _logger.warning('Git init failed with code %s and message: "%s"', git_init.returncode, git_init.stderr) + return + self._update_git_config() + return True - def _update_git(self, force): + def _update_git(self, force=False, poll_delay=5*60): """ Update the git repo on FS """ self.ensure_one() repo = self - _logger.debug('repo %s updating branches', repo.name) - + if not repo.remote_ids: + return False if not os.path.isdir(os.path.join(repo.path)): os.makedirs(repo.path) - self._clone() + force = self._git_init() or force - # check for mode == hook fname_fetch_head = os.path.join(repo.path, 'FETCH_HEAD') if not force and os.path.isfile(fname_fetch_head): fetch_time = os.path.getmtime(fname_fetch_head) - if repo.mode == 'hook' and (not repo.hook_time or repo.hook_time < fetch_time): - t0 = time.time() - _logger.debug('repo %s skip hook fetch fetch_time: %ss ago hook_time: %ss ago', - repo.name, int(t0 - fetch_time), int(t0 - repo.hook_time) if repo.hook_time else 'never') - return + if repo.mode == 'hook': + if not repo.hook_time or (repo.last_processed_hook_time and repo.hook_time <= repo.last_processed_hook_time): + return False + repo.last_processed_hook_time = repo.hook_time + if repo.mode == 'poll': + if (time.time() < fetch_time + poll_delay): + return False - self._update_fetch_cmd() + _logger.info('Updating repo %s', repo.name) + return self._update_fetch_cmd() def _update_fetch_cmd(self): # Extracted from update_git to be easily overriden in external module self.ensure_one() - repo = self try_count = 0 - failure = True + success = False delay = 0 - - while failure and try_count < 5: + while not success and try_count < 5: time.sleep(delay) try: - repo._git(['fetch', '-p', 'origin', '+refs/heads/*:refs/heads/*', '+refs/pull/*/head:refs/pull/*']) - failure = False + self._git(['fetch', '-p', '--all', ]) + success = True except subprocess.CalledProcessError as e: try_count += 1 delay = delay * 1.5 if delay else 0.5 if try_count > 4: - message = 'Failed to fetch repo %s: %s' % (repo.name, e.output.decode()) - _logger.exception(message) + message = 'Failed to fetch repo %s: %s' % (self.name, e.output.decode()) host = self.env['runbot.host']._get_current() + host.message_post(body='message') + self.env['runbot.runbot'].warning('Host %s got reserved because of fetch failure' % host.name) + _logger.exception(message) host.disable() + return success - def _update(self, force=True): + def _update(self, force=False, poll_delay=5*60): """ Update the physical git reposotories on FS""" - for repo in reversed(self): + for repo in self: try: - repo._update_git(force) # TODO xdo, check gc log and log warning + return repo._update_git(force, poll_delay) except Exception: _logger.exception('Fail to update repo %s', repo.name) - def _commit(self): - self.env.cr.commit() - self.env.cache.invalidate() - self.env.clear() - - def _scheduler(self, host): - nb_workers = host.get_nb_worker() - - self._gc_testing(host) - self._commit() - for build in self._get_builds_with_requested_actions(host): - build._process_requested_actions() - self._commit() - for build in self._get_builds_to_schedule(host): - build._schedule() - self._commit() - self._assign_pending_builds(host, nb_workers, [('build_type', '!=', 'scheduled')]) - self._commit() - self._assign_pending_builds(host, nb_workers-1 or nb_workers) - self._commit() - for build in self._get_builds_to_init(host): - build._init_pendings(host) - self._commit() - self._gc_running(host) - self._commit() - self._reload_nginx() - - def build_domain_host(self, host, domain=None): - domain = domain or [] - return [('repo_id', 'in', self.ids), ('host', '=', host.name)] + domain - - def _get_builds_with_requested_actions(self, host): - return self.env['runbot.build'].search(self.build_domain_host(host, [('requested_action', 'in', ['wake_up', 'deathrow'])])) - - def _get_builds_to_schedule(self, host): - return self.env['runbot.build'].search(self.build_domain_host(host, [('local_state', 'in', ['testing', 'running'])])) - - def _assign_pending_builds(self, host, nb_workers, domain=None): - if not self.ids or host.assigned_only or nb_workers <= 0: - return - domain_host = self.build_domain_host(host) - reserved_slots = self.env['runbot.build'].search_count(domain_host + [('local_state', 'in', ('testing', 'pending'))]) - assignable_slots = (nb_workers - reserved_slots) - if assignable_slots > 0: - allocated = self._allocate_builds(host, assignable_slots, domain) - if allocated: - _logger.debug('Builds %s where allocated to runbot' % allocated) - - def _get_builds_to_init(self, host): - domain_host = self.build_domain_host(host) - used_slots = self.env['runbot.build'].search_count(domain_host + [('local_state', '=', 'testing')]) - available_slots = host.get_nb_worker() - used_slots - if available_slots <= 0: - return self.env['runbot.build'] - return self.env['runbot.build'].search(domain_host + [('local_state', '=', 'pending')], limit=available_slots) - - def _gc_running(self, host): - running_max = host.get_running_max() - # terminate and reap doomed build - domain_host = self.build_domain_host(host) - Build = self.env['runbot.build'] - # some builds are marked as keep running - cannot_be_killed_ids = Build.search(domain_host + [('keep_running', '!=', True)]).ids - # we want to keep one build running per sticky, no mather which host - sticky_branches_ids = self.env['runbot.branch'].search([('sticky', '=', True)]).ids - # search builds on host on sticky branches, order by position in branch history - if sticky_branches_ids: - self.env.cr.execute(""" - SELECT - id - FROM ( - SELECT - bu.id AS id, - bu.host as host, - row_number() OVER (PARTITION BY branch_id order by bu.id desc) AS row - FROM - runbot_branch br INNER JOIN runbot_build bu ON br.id=bu.branch_id - WHERE - br.id in %s AND (bu.hidden = 'f' OR bu.hidden IS NULL) - ) AS br_bu - WHERE - row <= 4 AND host = %s - ORDER BY row, id desc - """, [tuple(sticky_branches_ids), host.name] - ) - cannot_be_killed_ids += self.env.cr.fetchall() - cannot_be_killed_ids = cannot_be_killed_ids[:running_max] # ensure that we don't try to keep more than we can handle - - build_ids = Build.search(domain_host + [('local_state', '=', 'running'), ('id', 'not in', cannot_be_killed_ids)], order='job_start desc').ids - Build.browse(build_ids)[running_max:]._kill() - - def _gc_testing(self, host): - """garbage collect builds that could be killed""" - # decide if we need room - Build = self.env['runbot.build'] - domain_host = self.build_domain_host(host) - testing_builds = Build.search(domain_host + [('local_state', 'in', ['testing', 'pending']), ('requested_action', '!=', 'deathrow')]) - used_slots = len(testing_builds) - available_slots = host.get_nb_worker() - used_slots - nb_pending = Build.search_count([('local_state', '=', 'pending'), ('host', '=', False)]) - if available_slots > 0 or nb_pending == 0: - return - for build in testing_builds: - top_parent = build._get_top_parent() - if not build.branch_id.sticky: - newer_candidates = Build.search([ - ('id', '>', build.id), - ('branch_id', '=', build.branch_id.id), - ('build_type', '=', 'normal'), - ('parent_id', '=', False), - ('hidden', '=', False), - ('config_id', '=', top_parent.config_id.id) - ]) - if newer_candidates: - top_parent._ask_kill(message='Build automatically killed, newer build found %s.' % newer_candidates.ids) - - def _allocate_builds(self, host, nb_slots, domain=None): - if nb_slots <= 0: - return [] - non_allocated_domain = [('repo_id', 'in', self.ids), ('local_state', '=', 'pending'), ('host', '=', False)] - if domain: - non_allocated_domain = expression.AND([non_allocated_domain, domain]) - e = expression.expression(non_allocated_domain, self.env['runbot.build']) - assert e.get_tables() == ['"runbot_build"'] - where_clause, where_params = e.to_sql() - - # self-assign to be sure that another runbot instance cannot self assign the same builds - query = """UPDATE - runbot_build - SET - host = %%s - WHERE - runbot_build.id IN ( - SELECT runbot_build.id - FROM runbot_build - LEFT JOIN runbot_branch - ON runbot_branch.id = runbot_build.branch_id - WHERE - %s - ORDER BY - array_position(array['normal','rebuild','indirect','scheduled']::varchar[], runbot_build.build_type) ASC, - runbot_branch.sticky DESC, - runbot_branch.priority DESC, - runbot_build.sequence ASC - FOR UPDATE OF runbot_build SKIP LOCKED - LIMIT %%s - ) - RETURNING id""" % where_clause - self.env.cr.execute(query, [host.name] + where_params + [nb_slots]) - return self.env.cr.fetchall() - - def _domain(self): - return self.env.get('ir.config_parameter').get_param('runbot.runbot_domain', fqdn()) - - def _reload_nginx(self): - settings = {} - settings['port'] = config.get('http_port') - settings['runbot_static'] = os.path.join(get_module_resource('runbot', 'static'), '') - nginx_dir = os.path.join(self._root(), 'nginx') - settings['nginx_dir'] = nginx_dir - settings['re_escape'] = re.escape - settings['fqdn'] = fqdn() - nginx_repos = self.search([('nginx', '=', True)], order='id') - if nginx_repos: - settings['builds'] = self.env['runbot.build'].search([('repo_id', 'in', nginx_repos.ids), ('local_state', '=', 'running'), ('host', '=', fqdn())]) - - nginx_config = self.env['ir.ui.view'].render_template("runbot.nginx_config", settings) - os.makedirs(nginx_dir, exist_ok=True) - content = None - nginx_conf_path = os.path.join(nginx_dir, 'nginx.conf') - content = '' - if os.path.isfile(nginx_conf_path): - with open(nginx_conf_path, 'rb') as f: - content = f.read() - if content != nginx_config: - _logger.debug('reload nginx') - with open(nginx_conf_path, 'wb') as f: - f.write(nginx_config) - try: - pid = int(open(os.path.join(nginx_dir, 'nginx.pid')).read().strip(' \n')) - os.kill(pid, signal.SIGHUP) - except Exception: - _logger.debug('start nginx') - if subprocess.call(['/usr/sbin/nginx', '-p', nginx_dir, '-c', 'nginx.conf']): - # obscure nginx bug leaving orphan worker listening on nginx port - if not subprocess.call(['pkill', '-f', '-P1', 'nginx: worker']): - _logger.debug('failed to start nginx - orphan worker killed, retrying') - subprocess.call(['/usr/sbin/nginx', '-p', nginx_dir, '-c', 'nginx.conf']) - else: - _logger.debug('failed to start nginx - failed to kill orphan worker - oh well') - - def _get_cron_period(self, min_margin=120): - """ Compute a randomized cron period with a 2 min margin below - real cron timeout from config. - """ - cron_limit = config.get('limit_time_real_cron') - req_limit = config.get('limit_time_real') - cron_timeout = cron_limit if cron_limit > -1 else req_limit - return cron_timeout - (min_margin + random.randint(1, 60)) - - def _cron_fetch_and_schedule(self, hostname): - """This method have to be called from a dedicated cron on a runbot - in charge of orchestration. - """ - - if hostname != fqdn(): - return 'Not for me' - - start_time = time.time() - timeout = self._get_cron_period() - icp = self.env['ir.config_parameter'] - update_frequency = int(icp.get_param('runbot.runbot_update_frequency', default=10)) - while time.time() - start_time < timeout: - repos = self.search([('mode', '!=', 'disabled')]) - repos._update(force=False) - repos._create_pending_builds() - self._commit() - time.sleep(update_frequency) - - def _cron_fetch_and_build(self, hostname): - """ This method have to be called from a dedicated cron - created on each runbot instance. - """ - - if hostname != fqdn(): - return 'Not for me' - - host = self.env['runbot.host']._get_current() - host.set_psql_conn_count() - host._bootstrap() - host.last_start_loop = fields.Datetime.now() - - self._commit() - start_time = time.time() - # 1. source cleanup - # -> Remove sources when no build is using them - # (could be usefull to keep them for wakeup but we can checkout them again if not forced push) - self.env['runbot.repo']._source_cleanup() - # 2. db and log cleanup - # -> Keep them as long as possible - self.env['runbot.build']._local_cleanup() - # 3. docker cleanup - self.env['runbot.repo']._docker_cleanup() - host._docker_build() - - timeout = self._get_cron_period() - icp = self.env['ir.config_parameter'] - update_frequency = int(icp.get_param('runbot.runbot_update_frequency', default=10)) - while time.time() - start_time < timeout: - time.sleep(self._scheduler_loop_turn(host, update_frequency)) - - host.last_end_loop = fields.Datetime.now() - - def _scheduler_loop_turn(self, host, default_sleep=1): - repos = self.search([('mode', '!=', 'disabled')]) - try: - repos._scheduler(host) - host.last_success = fields.Datetime.now() - self._commit() - except Exception as e: - self.env.cr.rollback() - self.env.clear() - _logger.exception(e) - message = str(e) - if host.last_exception == message: - host.exception_count += 1 - else: - host.last_exception = str(e) - host.exception_count = 1 - self._commit() - return random.uniform(0, 3) - else: - if host.last_exception: - host.last_exception = "" - host.exception_count = 0 - return default_sleep - - def _source_cleanup(self): - try: - if self.pool._init: - return - _logger.info('Source cleaning') - # we can remove a source only if no build are using them as name or rependency_ids aka as commit - cannot_be_deleted_builds = self.env['runbot.build'].search([('host', '=', fqdn()), ('local_state', 'not in', ('done', 'duplicate'))]) - cannot_be_deleted_path = set() - for build in cannot_be_deleted_builds: - for commit in build._get_all_commit(): - cannot_be_deleted_path.add(commit._source_path()) - - to_delete = set() - to_keep = set() - repos = self.search([('mode', '!=', 'disabled')]) - for repo in repos: - repo_source = os.path.join(repo._root(), 'sources', repo._get_repo_name_part(), '*') - for source_dir in glob.glob(repo_source): - if source_dir not in cannot_be_deleted_path: - to_delete.add(source_dir) - else: - to_keep.add(source_dir) - - # we are comparing cannot_be_deleted_path with to keep to sensure that the algorithm is working, we want to avoid to erase file by mistake - # note: it is possible that a parent_build is in testing without checkouting sources, but it should be exceptions - if to_delete: - if cannot_be_deleted_path != to_keep: - _logger.warning('Inconsistency between sources and database: \n%s \n%s' % (cannot_be_deleted_path-to_keep, to_keep-cannot_be_deleted_path)) - to_delete = list(to_delete) - to_keep = list(to_keep) - cannot_be_deleted_path = list(cannot_be_deleted_path) - for source_dir in to_delete: - _logger.info('Deleting source: %s' % source_dir) - assert 'static' in source_dir - shutil.rmtree(source_dir) - _logger.info('%s/%s source folder where deleted (%s kept)' % (len(to_delete), len(to_delete+to_keep), len(to_keep))) - except: - _logger.error('An exception occured while cleaning sources') - pass - - def _docker_cleanup(self): - _logger.info('Docker cleaning') - docker_ps_result = docker_ps() - containers = {int(dc.split('-', 1)[0]):dc for dc in docker_ps_result if dest_reg.match(dc)} - if containers: - candidates = self.env['runbot.build'].search([('id', 'in', list(containers.keys())), ('local_state', '=', 'done')]) - for c in candidates: - _logger.info('container %s found running with build state done', containers[c.id]) - docker_stop(containers[c.id], c._path()) - ignored = {dc for dc in docker_ps_result if not dest_reg.match(dc)} - if ignored: - _logger.debug('docker (%s) not deleted because not dest format', " ".join(list(ignored))) - class RefTime(models.Model): - _name = "runbot.repo.reftime" + _name = 'runbot.repo.reftime' _description = "Repo reftime" _log_access = False @@ -792,7 +554,7 @@ class RefTime(models.Model): class HookTime(models.Model): - _name = "runbot.repo.hooktime" + _name = 'runbot.repo.hooktime' _description = "Repo hooktime" _log_access = False diff --git a/runbot/models/res_config_settings.py b/runbot/models/res_config_settings.py index 2be869ff..35c0342c 100644 --- a/runbot/models/res_config_settings.py +++ b/runbot/models/res_config_settings.py @@ -1,29 +1,46 @@ # -*- coding: utf-8 -*- +import re from .. import common from odoo import api, fields, models +from odoo.exceptions import UserError class ResConfigSettings(models.TransientModel): _inherit = 'res.config.settings' - runbot_workers = fields.Integer('Total number of workers') + runbot_workers = fields.Integer('Default number of workers') runbot_running_max = fields.Integer('Maximum number of running builds') runbot_timeout = fields.Integer('Max allowed step timeout (in seconds)') runbot_starting_port = fields.Integer('Starting port for running builds') runbot_domain = fields.Char('Runbot domain') - runbot_max_age = fields.Integer('Max branch age (in days)') + runbot_max_age = fields.Integer('Max commit age (in days)') runbot_logdb_uri = fields.Char('Runbot URI for build logs') runbot_update_frequency = fields.Integer('Update frequency (in seconds)') runbot_template = fields.Char('Postgresql template', help="Postgresql template to use when creating DB's") runbot_message = fields.Text('Frontend warning message') + runbot_do_fetch = fields.Boolean('Discover new commits') + runbot_do_schedule = fields.Boolean('Schedule builds') + runbot_is_base_regex = fields.Char('Regex is_base') + + runbot_db_gc_days = fields.Integer('Days before gc', default=30, config_parameter='runbot.db_gc_days') + runbot_db_gc_days_child = fields.Integer('Days before gc of child', default=15, config_parameter='runbot.db_gc_days_child') + + runbot_pending_warning = fields.Integer('Pending warning limit', default=5, config_parameter='runbot.pending.warning') + runbot_pending_critical = fields.Integer('Pending critical limit', default=5, config_parameter='runbot.pending.critical') + + # TODO other icp + # runbot.runbot_maxlogs 100 + # runbot.runbot_nginx True + # migration db + # ln path @api.model def get_values(self): res = super(ResConfigSettings, self).get_values() get_param = self.env['ir.config_parameter'].sudo().get_param - res.update(runbot_workers=int(get_param('runbot.runbot_workers', default=6)), - runbot_running_max=int(get_param('runbot.runbot_running_max', default=75)), + res.update(runbot_workers=int(get_param('runbot.runbot_workers', default=2)), + runbot_running_max=int(get_param('runbot.runbot_running_max', default=5)), runbot_timeout=int(get_param('runbot.runbot_timeout', default=10000)), runbot_starting_port=int(get_param('runbot.runbot_starting_port', default=2000)), runbot_domain=get_param('runbot.runbot_domain', default=common.fqdn()), @@ -32,6 +49,9 @@ class ResConfigSettings(models.TransientModel): runbot_update_frequency=int(get_param('runbot.runbot_update_frequency', default=10)), runbot_template=get_param('runbot.runbot_db_template'), runbot_message=get_param('runbot.runbot_message', default=''), + runbot_do_fetch=get_param('runbot.runbot_do_fetch', default=False), + runbot_do_schedule=get_param('runbot.runbot_do_schedule', default=False), + runbot_is_base_regex=get_param('runbot.runbot_is_base_regex', default='') ) return res @@ -48,3 +68,16 @@ class ResConfigSettings(models.TransientModel): set_param('runbot.runbot_update_frequency', self.runbot_update_frequency) set_param('runbot.runbot_db_template', self.runbot_template) set_param('runbot.runbot_message', self.runbot_message) + set_param('runbot.runbot_do_fetch', self.runbot_do_fetch) + set_param('runbot.runbot_do_schedule', self.runbot_do_schedule) + set_param('runbot.runbot_is_base_regex', self.runbot_is_base_regex) + + @api.onchange('runbot_is_base_regex') + def _on_change_is_base_regex(self): + """ verify that the base_regex is valid + """ + if self.runbot_is_base_regex: + try: + re.compile(self.runbot_is_base_regex) + except re.error: + raise UserError("The regex is invalid") diff --git a/runbot/models/runbot.py b/runbot/models/runbot.py new file mode 100644 index 00000000..c90bd7bf --- /dev/null +++ b/runbot/models/runbot.py @@ -0,0 +1,350 @@ +import time +import logging +import glob +import random +import re +import signal +import subprocess +import shutil + +from ..common import fqdn, dest_reg, os +from ..container import docker_ps, docker_stop + +from odoo import models, fields +from odoo.osv import expression +from odoo.tools import config +from odoo.modules.module import get_module_resource + +_logger = logging.getLogger(__name__) + + +# after this point, not realy a repo buisness +class Runbot(models.AbstractModel): + _name = 'runbot.runbot' + _description = 'Base runbot model' + + def _commit(self): + self.env.cr.commit() + self.env.cache.invalidate() + self.env.clear() + + def _root(self): + """Return root directory of repository""" + default = os.path.join(os.path.dirname(__file__), '../static') + return os.path.abspath(default) + + def _scheduler(self, host): + + self._gc_testing(host) + self._commit() + for build in self._get_builds_with_requested_actions(host): + build._process_requested_actions() + self._commit() + for build in self._get_builds_to_schedule(host): + build._schedule() + self._commit() + self._assign_pending_builds(host, host.nb_worker, [('build_type', '!=', 'scheduled')]) + self._commit() + self._assign_pending_builds(host, host.nb_worker-1 or host.nb_worker) + self._commit() + for build in self._get_builds_to_init(host): + build._init_pendings(host) + self._commit() + self._gc_running(host) + self._commit() + self._reload_nginx() + + def build_domain_host(self, host, domain=None): + domain = domain or [] + return [('host', '=', host.name)] + domain + + def _get_builds_with_requested_actions(self, host): + return self.env['runbot.build'].search(self.build_domain_host(host, [('requested_action', 'in', ['wake_up', 'deathrow'])])) + + def _get_builds_to_schedule(self, host): + return self.env['runbot.build'].search(self.build_domain_host(host, [('local_state', 'in', ['testing', 'running'])])) + + def _assign_pending_builds(self, host, nb_worker, domain=None): + if host.assigned_only or nb_worker <= 0: + return + domain_host = self.build_domain_host(host) + reserved_slots = self.env['runbot.build'].search_count(domain_host + [('local_state', 'in', ('testing', 'pending'))]) + assignable_slots = (nb_worker - reserved_slots) + if assignable_slots > 0: + allocated = self._allocate_builds(host, assignable_slots, domain) + if allocated: + + _logger.info('Builds %s where allocated to runbot', allocated) + + def _get_builds_to_init(self, host): + domain_host = self.build_domain_host(host) + used_slots = self.env['runbot.build'].search_count(domain_host + [('local_state', '=', 'testing')]) + available_slots = host.nb_worker - used_slots + if available_slots <= 0: + return self.env['runbot.build'] + return self.env['runbot.build'].search(domain_host + [('local_state', '=', 'pending')], limit=available_slots) + + def _gc_running(self, host): + running_max = host.get_running_max() + domain_host = self.build_domain_host(host) + Build = self.env['runbot.build'] + cannot_be_killed_ids = Build.search(domain_host + [('keep_running', '!=', True)]).ids + sticky_bundles = self.env['runbot.bundle'].search([('sticky', '=', True)]) + cannot_be_killed_ids = [ + build.id + for build in sticky_bundles.mapped('last_batchs.slot_ids.build_id') + if build.host == host.name + ][:running_max] + build_ids = Build.search(domain_host + [('local_state', '=', 'running'), ('id', 'not in', cannot_be_killed_ids)], order='job_start desc').ids + Build.browse(build_ids)[running_max:]._kill() + + def _gc_testing(self, host): + """garbage collect builds that could be killed""" + # decide if we need room + Build = self.env['runbot.build'] + domain_host = self.build_domain_host(host) + testing_builds = Build.search(domain_host + [('local_state', 'in', ['testing', 'pending']), ('requested_action', '!=', 'deathrow')]) + used_slots = len(testing_builds) + available_slots = host.nb_worker - used_slots + nb_pending = Build.search_count([('local_state', '=', 'pending'), ('host', '=', False)]) + if available_slots > 0 or nb_pending == 0: + return + + for build in testing_builds: + top_parent = build._get_top_parent() + if build.killable: + top_parent._ask_kill(message='Build automatically killed, new build found.') + + def _allocate_builds(self, host, nb_slots, domain=None): + if nb_slots <= 0: + return [] + non_allocated_domain = [('local_state', '=', 'pending'), ('host', '=', False)] + if domain: + non_allocated_domain = expression.AND([non_allocated_domain, domain]) + e = expression.expression(non_allocated_domain, self.env['runbot.build']) + assert e.get_tables() == ['"runbot_build"'] + where_clause, where_params = e.to_sql() + + # self-assign to be sure that another runbot batch cannot self assign the same builds + query = """UPDATE + runbot_build + SET + host = %%s + WHERE + runbot_build.id IN ( + SELECT runbot_build.id + FROM runbot_build + WHERE + %s + ORDER BY + array_position(array['normal','rebuild','indirect','scheduled']::varchar[], runbot_build.build_type) ASC + FOR UPDATE OF runbot_build SKIP LOCKED + LIMIT %%s + ) + RETURNING id""" % where_clause + self.env.cr.execute(query, [host.name] + where_params + [nb_slots]) + return self.env.cr.fetchall() + + def _domain(self): + return self.env.get('ir.config_parameter').get_param('runbot.runbot_domain', fqdn()) + + def _reload_nginx(self): + env = self.env + settings = {} + settings['port'] = config.get('http_port') + settings['runbot_static'] = os.path.join(get_module_resource('runbot', 'static'), '') + nginx_dir = os.path.join(self._root(), 'nginx') + settings['nginx_dir'] = nginx_dir + settings['re_escape'] = re.escape + settings['fqdn'] = fqdn() + + icp = env['ir.config_parameter'].sudo() + nginx = icp.get_param('runbot.runbot_nginx', True) # or just force nginx? + + if nginx: + settings['builds'] = env['runbot.build'].search([('local_state', '=', 'running'), ('host', '=', fqdn())]) + + nginx_config = env['ir.ui.view'].render_template("runbot.nginx_config", settings) + os.makedirs(nginx_dir, exist_ok=True) + content = None + nginx_conf_path = os.path.join(nginx_dir, 'nginx.conf') + content = '' + if os.path.isfile(nginx_conf_path): + with open(nginx_conf_path, 'rb') as f: + content = f.read() + if content != nginx_config: + _logger.debug('reload nginx') + with open(nginx_conf_path, 'wb') as f: + f.write(nginx_config) + try: + pid = int(open(os.path.join(nginx_dir, 'nginx.pid')).read().strip(' \n')) + os.kill(pid, signal.SIGHUP) + except Exception: + _logger.debug('start nginx') + if subprocess.call(['/usr/sbin/nginx', '-p', nginx_dir, '-c', 'nginx.conf']): + # obscure nginx bug leaving orphan worker listening on nginx port + if not subprocess.call(['pkill', '-f', '-P1', 'nginx: worker']): + _logger.debug('failed to start nginx - orphan worker killed, retrying') + subprocess.call(['/usr/sbin/nginx', '-p', nginx_dir, '-c', 'nginx.conf']) + else: + _logger.debug('failed to start nginx - failed to kill orphan worker - oh well') + + def _get_cron_period(self): + """ Compute a randomized cron period with a 2 min margin below + real cron timeout from config. + """ + cron_limit = config.get('limit_time_real_cron') + req_limit = config.get('limit_time_real') + cron_timeout = cron_limit if cron_limit > -1 else req_limit + return cron_timeout / 2 + + def _cron(self): + """ + This method is the default cron for new commit discovery and build sheduling. + The cron runs for a long time to avoid spamming logs + """ + start_time = time.time() + timeout = self._get_cron_period() + get_param = self.env['ir.config_parameter'].get_param + update_frequency = int(get_param('runbot.runbot_update_frequency', default=10)) + runbot_do_fetch = get_param('runbot.runbot_do_fetch') + runbot_do_schedule = get_param('runbot.runbot_do_schedule') + host = self.env['runbot.host']._get_current() + host.set_psql_conn_count() + host.last_start_loop = fields.Datetime.now() + self._commit() + # Bootstrap + host._bootstrap() + if runbot_do_schedule: + host._docker_build() + self._source_cleanup() + self.env['runbot.build']._local_cleanup() + self._docker_cleanup() + _logger.info('Starting loop') + while time.time() - start_time < timeout: + repos = self.env['runbot.repo'].search([('mode', '!=', 'disabled')]) + + processing_batch = self.env['runbot.batch'].search([('state', 'in', ('preparing', 'ready'))], order='id asc') + preparing_batch = processing_batch.filtered(lambda b: b.state == 'preparing') + self._commit() + if runbot_do_fetch: + for repo in repos: + repo._update_batches(bool(preparing_batch)) + self._commit() + if processing_batch: + _logger.info('starting processing of %s batches', len(processing_batch)) + for batch in processing_batch: + batch._process() + self._commit() + _logger.info('end processing') + self._commit() + if runbot_do_schedule: + sleep_time = self._scheduler_loop_turn(host, update_frequency) + self.sleep(sleep_time) + else: + self.sleep(update_frequency) + self._commit() + + host.last_end_loop = fields.Datetime.now() + + def sleep(self, t): + time.sleep(t) + + def _scheduler_loop_turn(self, host, default_sleep=1): + try: + self._scheduler(host) + host.last_success = fields.Datetime.now() + self._commit() + except Exception as e: + self.env.cr.rollback() + self.env.clear() + _logger.exception(e) + message = str(e) + if host.last_exception == message: + host.exception_count += 1 + else: + host.last_exception = str(e) + host.exception_count = 1 + self._commit() + return random.uniform(0, 3) + else: + if host.last_exception: + host.last_exception = "" + host.exception_count = 0 + return default_sleep + + def _source_cleanup(self): + try: + if self.pool._init: + return + _logger.info('Source cleaning') + # we can remove a source only if no build are using them as name or rependency_ids aka as commit + cannot_be_deleted_builds = self.env['runbot.build'].search([('host', '=', fqdn()), ('local_state', '!=', 'done')]) + cannot_be_deleted_builds |= cannot_be_deleted_builds.mapped('params_id.builds_reference_ids') + cannot_be_deleted_path = set() + for build in cannot_be_deleted_builds: + for build_commit in build.params_id.commit_link_ids: + cannot_be_deleted_path.add(build_commit.commit_id._source_path()) + + to_delete = set() + to_keep = set() + repos = self.env['runbot.repo'].search([('mode', '!=', 'disabled')]) + for repo in repos: + repo_source = os.path.join(self._root(), 'sources', repo.name, '*') + for source_dir in glob.glob(repo_source): + if source_dir not in cannot_be_deleted_path: + to_delete.add(source_dir) + else: + to_keep.add(source_dir) + + # we are comparing cannot_be_deleted_path with to keep to sensure that the algorithm is working, we want to avoid to erase file by mistake + # note: it is possible that a parent_build is in testing without checkouting sources, but it should be exceptions + if to_delete: + if cannot_be_deleted_path != to_keep: + _logger.warning('Inconsistency between sources and database: \n%s \n%s' % (cannot_be_deleted_path-to_keep, to_keep-cannot_be_deleted_path)) + to_delete = list(to_delete) + to_keep = list(to_keep) + cannot_be_deleted_path = list(cannot_be_deleted_path) + for source_dir in to_delete: + _logger.info('Deleting source: %s' % source_dir) + assert 'static' in source_dir + shutil.rmtree(source_dir) + _logger.info('%s/%s source folder where deleted (%s kept)' % (len(to_delete), len(to_delete+to_keep), len(to_keep))) + except: + _logger.exception('An exception occured while cleaning sources') + pass + + def _docker_cleanup(self): + _logger.info('Docker cleaning') + docker_ps_result = docker_ps() + + containers = {} + ignored = [] + for dc in docker_ps_result: + build = self.env['runbot.build']._build_from_dest(dc) + if build: + containers[build.id] = dc + if containers: + candidates = self.env['runbot.build'].search([('id', 'in', list(containers.keys())), ('local_state', '=', 'done')]) + for c in candidates: + _logger.info('container %s found running with build state done', containers[c.id]) + docker_stop(containers[c.id], c._path()) + ignored = {dc for dc in docker_ps_result if not dest_reg.match(dc)} + if ignored: + _logger.debug('docker (%s) not deleted because not dest format', list(ignored)) + + def warning(self, message, *args): + if args: + message = message % args + return self.env['runbot.warning'].create({'message': message}) + + +class RunbotWarning(models.Model): + """ + Generic Warnings for runbot + """ + + _name = 'runbot.warning' + _description = 'Generic Runbot Warning' + + message = fields.Char("Warning", index=True) diff --git a/runbot/models/upgrade.py b/runbot/models/upgrade.py new file mode 100644 index 00000000..705771e5 --- /dev/null +++ b/runbot/models/upgrade.py @@ -0,0 +1,63 @@ +import re +from odoo import models, fields +from odoo.exceptions import UserError + + +class UpgradeExceptions(models.Model): + _name = 'runbot.upgrade.exception' + _description = 'Upgrade exception' + + active = fields.Boolean('Active', default=True) + elements = fields.Text('Elements') + bundle_id = fields.Many2one('runbot.bundle', index=True) + info = fields.Text('Info') + + def _generate(self): + exceptions = self.search([]) + if exceptions: + return 'suppress_upgrade_warnings=%s' % (','.join(exceptions.mapped('elements'))).replace(' ', '').replace('\n', ',') + return False + + +class UpgradeRegex(models.Model): + _name = 'runbot.upgrade.regex' + _description = 'Upgrade regex' + + active = fields.Boolean('Active', default=True) + prefix = fields.Char('Type') + regex = fields.Char('Regex') + + +class BuildResult(models.Model): + _inherit = 'runbot.build' + + def _parse_upgrade_errors(self): + ir_logs = self.env['ir.logging'].search([('level', 'in', ('ERROR', 'WARNING', 'CRITICAL')), ('type', '=', 'server'), ('build_id', 'in', self.ids)]) + + upgrade_regexes = self.env['runbot.upgrade.regex'].search([]) + exception = [] + for log in ir_logs: + for upgrade_regex in upgrade_regexes: + m = re.search(upgrade_regex.regex, log.message) + if m: + exception.append('%s:%s' % (upgrade_regex.prefix, m.groups()[0])) + + if exception: + bundle = False + batches = self._get_top_parent().slot_ids.mapped('batch_id') + if batches: + bundle = batches[0].bundle_id.id + res = { + 'name': 'Upgrade Exception', + 'type': 'ir.actions.act_window', + 'res_model': 'runbot.upgrade.exception', + 'view_mode': 'form', + 'context': { + 'default_elements': '\n'.join(exception), + 'default_bundle_id': bundle, + 'default_info': 'Automatically generated from build %s' % self.id + } + } + return res + else: + raise UserError('Nothing found here') diff --git a/runbot/models/user.py b/runbot/models/user.py new file mode 100644 index 00000000..4af0b9be --- /dev/null +++ b/runbot/models/user.py @@ -0,0 +1,10 @@ + +from odoo import models, fields + + +class User(models.Model): + _inherit = 'res.users' + + # Add default action_id + action_id = fields.Many2one('ir.actions.actions', + default=lambda self: self.env.ref('runbot.runbot_menu_warning_root', raise_if_not_found=False)) diff --git a/runbot/models/version.py b/runbot/models/version.py new file mode 100644 index 00000000..4a889cf6 --- /dev/null +++ b/runbot/models/version.py @@ -0,0 +1,102 @@ +import logging +import re +from odoo import models, fields, api, tools + + +_logger = logging.getLogger(__name__) + + +class Version(models.Model): + _name = 'runbot.version' + _description = "Version" + _order = 'sequence desc, number desc,id' + + name = fields.Char('Version name') + number = fields.Char('Version number', compute='_compute_version_number', store=True, help="Usefull to sort by version") + sequence = fields.Integer('sequence') + is_major = fields.Char('Is major version', compute='_compute_version_number', store=True) + + base_bundle_id = fields.Many2one('runbot.bundle', compute='_compute_base_bundle_id') + + previous_major_version_id = fields.Many2one('runbot.version', compute='_compute_version_relations') + intermediate_version_ids = fields.Many2many('runbot.version', compute='_compute_version_relations') + next_major_version_id = fields.Many2one('runbot.version', compute='_compute_version_relations') + next_intermediate_version_ids = fields.Many2many('runbot.version', compute='_compute_version_relations') + + @api.depends('name') + def _compute_version_number(self): + for version in self: + if version.name == 'master': + version.number = '~' + version.is_major = False + else: + # max version number with this format: 99.99 + version.number = '.'.join([elem.zfill(2) for elem in re.sub(r'[^0-9\.]', '', version.name).split('.')]) + version.is_major = all(elem == '00' for elem in version.number.split('.')[1:]) + + def create(self, values): + model = self.browse() + model._get_id.clear_cache(model) + return super().create(values) + + def _get(self, name): + return self.browse(self._get_id(name)) + + @tools.ormcache('name') + def _get_id(self, name): + version = self.search([('name', '=', name)]) + if not version: + version = self.create({ + 'name': name, + }) + return version.id + + @api.depends('is_major', 'number') + def _compute_version_relations(self): + all_versions = self.search([], order='sequence, number') + for version in self: + version.previous_major_version_id = next( + ( + v + for v in reversed(all_versions) + if v.is_major and v.number < version.number and v.sequence <= version.sequence # TODO FIXME, make version comparable? + ), self.browse()) + if version.previous_major_version_id: + version.intermediate_version_ids = all_versions.filtered( + lambda v, current=version: v.number > current.previous_major_version_id.number and v.number < current.number and v.sequence <= current.sequence and v.sequence >= current.previous_major_version_id.sequence + ) + else: + version.intermediate_version_ids = all_versions.filtered( + lambda v, current=version: v.number < current.number and v.sequence <= current.sequence + ) + version.next_major_version_id = next( + ( + v + for v in all_versions + if (v.is_major or v.name == 'master') and v.number > version.number and v.sequence >= version.sequence + ), self.browse()) + if version.next_major_version_id: + version.next_intermediate_version_ids = all_versions.filtered( + lambda v, current=version: v.number < current.next_major_version_id.number and v.number > current.number and v.sequence <= current.next_major_version_id.sequence and v.sequence >= current.sequence + ) + else: + version.next_intermediate_version_ids = all_versions.filtered( + lambda v, current=version: v.number > current.number and v.sequence >= current.sequence + ) + + # @api.depends('base_bundle_id.is_base', 'base_bundle_id.version_id', 'base_bundle_id.project_id') + @api.depends_context('project_id') + def _compute_base_bundle_id(self): + project_id = self.env.context.get('project_id') + if not project_id: + _logger.warning("_compute_base_bundle_id: no project_id in context") + project_id = self.env.ref('runbot.main_project').id + + bundles = self.env['runbot.bundle'].search([ + ('version_id', 'in', self.ids), + ('is_base', '=', True), + ('project_id', '=', project_id) + ]) + bundle_by_version = {bundle.version_id.id: bundle for bundle in bundles} + for version in self: + version.base_bundle_id = bundle_by_version.get(version.id) diff --git a/runbot/security/ir.model.access.csv b/runbot/security/ir.model.access.csv index 83dbbd15..8e40c8aa 100644 --- a/runbot/security/ir.model.access.csv +++ b/runbot/security/ir.model.access.csv @@ -1,12 +1,10 @@ id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink -access_runbot_repo,runbot_repo,runbot.model_runbot_repo,group_user,1,0,0,0 +access_runbot_remote,runbot_remote,runbot.model_runbot_remote,group_user,1,0,0,0 access_runbot_branch,runbot_branch,runbot.model_runbot_branch,group_user,1,0,0,0 access_runbot_build,runbot_build,runbot.model_runbot_build,group_user,1,0,0,0 -access_runbot_build_dependency,runbot_build_dependency,runbot.model_runbot_build_dependency,group_user,1,0,0,0 -access_runbot_repo_admin,runbot_repo_admin,runbot.model_runbot_repo,runbot.group_runbot_admin,1,1,1,1 +access_runbot_remote_admin,runbot_remote_admin,runbot.model_runbot_remote,runbot.group_runbot_admin,1,1,1,1 access_runbot_branch_admin,runbot_branch_admin,runbot.model_runbot_branch,runbot.group_runbot_admin,1,1,1,1 access_runbot_build_admin,runbot_build_admin,runbot.model_runbot_build,runbot.group_runbot_admin,1,1,1,1 -access_runbot_build_dependency_admin,runbot_build_dependency_admin,runbot.model_runbot_build_dependency,runbot.group_runbot_admin,1,1,1,1 access_irlogging,log by runbot users,base.model_ir_logging,group_user,0,0,1,0 access_runbot_build_config_step_user,runbot_build_config_step_user,runbot.model_runbot_build_config_step,group_user,1,0,0,0 @@ -18,6 +16,9 @@ access_runbot_build_config_manager,runbot_build_config_manager,runbot.model_runb access_runbot_build_config_step_order_user,runbot_build_config_step_order_user,runbot.model_runbot_build_config_step_order,group_user,1,0,0,0 access_runbot_build_config_step_order_manager,runbot_build_config_step_order_manager,runbot.model_runbot_build_config_step_order,runbot.group_build_config_user,1,1,1,1 +access_runbot_config_step_upgrade_db_user,runbot_config_step_upgrade_db_user,runbot.model_runbot_config_step_upgrade_db,group_user,1,0,0,0 +access_runbot_config_step_upgrade_db_manager,runbot_config_step_upgrade_db_manager,runbot.model_runbot_config_step_upgrade_db,runbot.group_build_config_user,1,1,1,1 + access_runbot_build_error_user,runbot_build_error_user,runbot.model_runbot_build_error,group_user,1,0,0,0 access_runbot_build_error_manager,runbot_build_error_manager,runbot.model_runbot_build_error,runbot.group_runbot_admin,1,1,1,1 access_runbot_build_error_tag_user,runbot_build_error_tag_user,runbot.model_runbot_build_error_tag,group_user,1,0,0,0 @@ -33,7 +34,7 @@ access_runbot_error_log_user,runbot_error_log_user,runbot.model_runbot_error_log access_runbot_error_log_manager,runbot_error_log_manager,runbot.model_runbot_error_log,runbot.group_runbot_admin,1,1,1,1 access_runbot_repo_hooktime,runbot_repo_hooktime,runbot.model_runbot_repo_hooktime,group_user,1,0,0,0 -access_runbot_repo_reftime,runbot_repo_reftime,runbot.model_runbot_repo_reftime,group_user,1,0,0,0 +access_runbot_repo_referencetime,runbot_repo_referencetime,runbot.model_runbot_repo_reftime,group_user,1,0,0,0 access_runbot_build_stat_user,runbot_build_stat_user,runbot.model_runbot_build_stat,group_user,1,0,0,0 access_runbot_build_stat_admin,runbot_build_stat_admin,runbot.model_runbot_build_stat,runbot.group_runbot_admin,1,1,1,1 @@ -41,5 +42,62 @@ access_runbot_build_stat_admin,runbot_build_stat_admin,runbot.model_runbot_build access_runbot_build_stat_sql_user,runbot_build_stat_sql_user,runbot.model_runbot_build_stat_sql,group_user,1,0,0,0 access_runbot_build_stat_sql_admin,runbot_build_stat_sql_admin,runbot.model_runbot_build_stat_sql,runbot.group_runbot_admin,1,0,0,0 -access_runbot_build_stat_regex_user,access_runbot_build_stat_regex_user,model_runbot_build_stat_regex,runbot.group_user,1,0,0,0 -access_runbot_build_stat_regex_admin,access_runbot_build_stat_regex_admin,model_runbot_build_stat_regex,runbot.group_runbot_admin,1,1,1,1 +access_runbot_build_stat_regex_user,access_runbot_build_stat_regex_user,runbot.model_runbot_build_stat_regex,runbot.group_user,1,0,0,0 +access_runbot_build_stat_regex_admin,access_runbot_build_stat_regex_admin,runbot.model_runbot_build_stat_regex,runbot.group_runbot_admin,1,1,1,1 + + +access_runbot_trigger_user,access_runbot_trigger_user,runbot.model_runbot_trigger,runbot.group_user,1,0,0,0 +access_runbot_trigger_runbot_admin,access_runbot_trigger_runbot_admin,runbot.model_runbot_trigger,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_repo_user,access_runbot_repo_user,runbot.model_runbot_repo,runbot.group_user,1,0,0,0 +access_runbot_repo_runbot_admin,access_runbot_repo_runbot_admin,runbot.model_runbot_repo,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_commit_user,access_runbot_commit_user,runbot.model_runbot_commit,runbot.group_user,1,0,0,0 + +access_runbot_build_params_user,access_runbot_build_params_user,runbot.model_runbot_build_params,runbot.group_user,1,0,0,0 +access_runbot_build_params_runbot_admin,access_runbot_build_params_runbot_admin,runbot.model_runbot_build_params,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_commit_link_user,access_runbot_commit_link_user,runbot.model_runbot_commit_link,runbot.group_user,1,0,0,0 +access_runbot_commit_link_runbot_admin,access_runbot_commit_link_runbot_admin,runbot.model_runbot_commit_link,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_version_user,access_runbot_version_user,runbot.model_runbot_version,runbot.group_user,1,0,0,0 +access_runbot_version_runbot_admin,access_runbot_version_runbot_admin,runbot.model_runbot_version,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_project_user,access_runbot_project_user,runbot.model_runbot_project,runbot.group_user,1,0,0,0 +access_runbot_project_runbot_admin,access_runbot_project_runbot_admin,runbot.model_runbot_project,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_bundle_user,access_runbot_bundle_user,runbot.model_runbot_bundle,runbot.group_user,1,0,0,0 +access_runbot_bundle_runbot_admin,access_runbot_bundle_runbot_admin,runbot.model_runbot_bundle,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_batch_user,access_runbot_batch_user,runbot.model_runbot_batch,runbot.group_user,1,0,0,0 +access_runbot_batch_runbot_admin,access_runbot_batch_runbot_admin,runbot.model_runbot_batch,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_batch_slot_user,access_runbot_batch_slot_user,runbot.model_runbot_batch_slot,runbot.group_user,1,0,0,0 +access_runbot_batch_slot_runbot_admin,access_runbot_batch_slot_runbot_admin,runbot.model_runbot_batch_slot,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_ref_log_runbot_user,access_runbot_ref_log_runbot_user,runbot.model_runbot_ref_log,runbot.group_user,1,0,0,0 +access_runbot_ref_log_runbot_admin,access_runbot_ref_log_runbot_admin,runbot.model_runbot_ref_log,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_commit_status_runbot_user,access_runbot_commit_status_runbot_user,runbot.model_runbot_commit_status,runbot.group_user,1,0,0,0 +access_runbot_commit_status_runbot_admin,access_runbot_commit_status_runbot_admin,runbot.model_runbot_commit_status,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_bundle_trigger_custom_runbot_user,access_runbot_bundle_trigger_custom_runbot_user,runbot.model_runbot_bundle_trigger_custom,runbot.group_user,1,0,0,0 +access_runbot_bundle_trigger_custom_runbot_admin,access_runbot_bundle_trigger_custom_runbot_admin,runbot.model_runbot_bundle_trigger_custom,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_category_runbot_user,access_runbot_category_runbot_user,runbot.model_runbot_category,runbot.group_user,1,0,0,0 +access_runbot_category_runbot_admin,access_runbot_category_runbot_admin,runbot.model_runbot_category,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_batch_log_runbot_user,access_runbot_batch_log_runbot_user,runbot.model_runbot_batch_log,runbot.group_user,1,0,0,0 + +access_runbot_warning_user,access_runbot_warning_user,runbot.model_runbot_warning,runbot.group_user,1,0,0,0 +access_runbot_warning_admin,access_runbot_warning_admin,runbot.model_runbot_warning,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_database_user,access_runbot_database_user,runbot.model_runbot_database,runbot.group_user,1,0,0,0 +access_runbot_database_admin,access_runbot_database_admin,runbot.model_runbot_database,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_upgrade_regex_user,access_runbot_upgrade_regex_user,runbot.model_runbot_upgrade_regex,runbot.group_user,1,0,0,0 +access_runbot_upgrade_regex_admin,access_runbot_upgrade_regex_admin,runbot.model_runbot_upgrade_regex,runbot.group_runbot_admin,1,1,1,1 + +access_runbot_upgrade_exception_user,access_runbot_upgrade_exception_user,runbot.model_runbot_upgrade_exception,runbot.group_user,1,0,0,0 +access_runbot_upgrade_exception_admin,access_runbot_upgrade_exception_admin,runbot.model_runbot_upgrade_exception,runbot.group_runbot_admin,1,1,1,1 + diff --git a/runbot/security/ir.rule.csv b/runbot/security/ir.rule.csv index 386145f5..0e6bd929 100644 --- a/runbot/security/ir.rule.csv +++ b/runbot/security/ir.rule.csv @@ -1,7 +1,14 @@ id,name,model_id/id,groups/id,domain_force,perm_read,perm_create,perm_write,perm_unlink -rule_repo,"limited to groups",model_runbot_repo,group_user,"['|', ('group_ids', '=', False), ('group_ids', 'in', [g.id for g in user.groups_id])]",1,1,1,1 + + +rule_project,"limited to groups",model_runbot_project,group_user,"['|', ('group_ids', '=', False), ('group_ids', 'in', [g.id for g in user.groups_id])]",1,1,1,1 +rule_project_mgmt,"manager can see all",model_runbot_project,group_runbot_admin,"[(1, '=', 1)]",1,1,1,1 + +rule_repo,"limited to groups",model_runbot_repo,group_user,"['|', ('project_id.group_ids', '=', False), ('project_id.group_ids', 'in', [g.id for g in user.groups_id])]",1,1,1,1 rule_repo_mgmt,"manager can see all",model_runbot_repo,group_runbot_admin,"[(1, '=', 1)]",1,1,1,1 -rule_branch,"limited to groups",model_runbot_branch,group_user,"['|', ('repo_id.group_ids', '=', False), ('repo_id.group_ids', 'in', [g.id for g in user.groups_id])]",1,1,1,1 +rule_branch,"limited to groups",model_runbot_branch,group_user,"['|', ('remote_id.repo_id.project_id.group_ids', '=', False), ('remote_id.repo_id.project_id.group_ids', 'in', [g.id for g in user.groups_id])]",1,1,1,1 rule_branch_mgmt,"manager can see all",model_runbot_branch,group_runbot_admin,"[(1, '=', 1)]",1,1,1,1 -rule_build,"limited to groups",model_runbot_build,group_user,"['|', ('repo_id.group_ids', '=', False), ('repo_id.group_ids', 'in', [g.id for g in user.groups_id])]",1,1,1,1 +rule_commit,"limited to groups",model_runbot_commit,group_user,"['|', ('repo_id.project_id.group_ids', '=', False), ('repo_id.project_id.group_ids', 'in', [g.id for g in user.groups_id])]",1,1,1,1 +rule_commit_mgmt,"manager can see all",model_runbot_commit,group_runbot_admin,"[(1, '=', 1)]",1,1,1,1 +rule_build,"limited to groups",model_runbot_build,group_user,"['|', ('params_id.project_id.group_ids', '=', False), ('params_id.project_id.group_ids', 'in', [g.id for g in user.groups_id])]",1,1,1,1 rule_build_mgmt,"manager can see all",model_runbot_build,group_runbot_admin,"[(1, '=', 1)]",1,1,1,1 diff --git a/runbot/security/runbot_security.xml b/runbot/security/runbot_security.xml index 9a77fdec..1f0c6cfa 100644 --- a/runbot/security/runbot_security.xml +++ b/runbot/security/runbot_security.xml @@ -1,13 +1,13 @@ - + Runbot User - + @@ -17,39 +17,43 @@ + + + + - - Manager - - - - - - + Build Config Build config user - + Build config manager - + Build config administrator - + + + Runbot administrator + + + + + All config can be edited by config admin diff --git a/runbot/static/src/css/runbot.css b/runbot/static/src/css/runbot.css deleted file mode 100644 index 3340f171..00000000 --- a/runbot/static/src/css/runbot.css +++ /dev/null @@ -1,87 +0,0 @@ -.separator { - border-top: 2px solid #666; -} - -[data-toggle="collapse"] .fa:before { - content: "\f139"; -} - -[data-toggle="collapse"].collapsed .fa:before { - content: "\f13a"; -} - -body, .table{ - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - color:#444; -} - -.btn-default { - background-color: #fff; - color: #444; - border-color: #ccc; -} - -.btn-default:hover { - background-color: #ccc; - color: #444; - border-color: #ccc; -} - -.btn-sm, .btn-group-sm > .btn { - padding: 0.25rem 0.5rem; - font-size: 0.89rem; - line-height: 1.5; - border-radius: 0.2rem; -} -.btn-ssm, .btn-group-ssm > .btn { - padding: 0.22rem 0.4rem; - font-size: 0.82rem; - line-height: 1; - border-radius: 0.2rem; -} - -.killed, .bg-killed, .bg-killed-light { - background-color: #aaa; -} - -.dropdown-toggle:after { content: none } - -.branch_name { - max-width: 250px; - white-space: nowrap; - overflow: hidden; - text-overflow: ellipsis; -} - -.branch_time { - float:right; - margin-left:10px; -} - -.bg-success-light { - background-color: #dff0d8; -} -.bg-danger-light { - background-color: #f2dede; -} -.bg-info-light { - background-color: #d9edf7; -} - -.text-info{ - color: #096b72 !important; -} -.build_subject_buttons { - display: flex; -} -.build_buttons { - margin-left: auto -} - -.bg-killed { - background-color: #aaa; -} - -.label-killed { - background-color: #aaa; -} diff --git a/runbot/static/src/css/runbot.scss b/runbot/static/src/css/runbot.scss new file mode 100644 index 00000000..92ad5ba6 --- /dev/null +++ b/runbot/static/src/css/runbot.scss @@ -0,0 +1,202 @@ +.separator { + border-top: 2px solid #666; +} + +[data-toggle="collapse"] .fa:before { + content: "\f139"; +} + +[data-toggle="collapse"].collapsed .fa:before { + content: "\f13a"; +} + +body, .table{ + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + color:#444; +} + +.btn-default { + background-color: #fff; + color: #444; + border-color: #ccc; +} + +.btn-default:hover { + background-color: #ccc; + color: #444; + border-color: #ccc; +} + +.btn-sm, .btn-group-sm > .btn { + padding: 0.25rem 0.5rem; + font-size: 0.89rem; + line-height: 1.5; + border-radius: 0.2rem; +} +.btn-ssm, .btn-group-ssm > .btn { + padding: 0.22rem 0.4rem; + font-size: 0.82rem; + line-height: 1; + border-radius: 0.2rem; +} + +.killed, .bg-killed, .bg-killed-light { + background-color: #aaa; +} + +.dropdown-toggle:after { content: none } + +.one_line { + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} +.batch_tile { + padding: 6px; +} +.branch_time { + float:right; + margin-left:10px; +} + + +:root { + --info-light: #d9edf7; +} + + +.bg-success-light { + background-color: #dff0d8; +} +.bg-danger-light { + background-color: #f2dede; +} +.bg-info-light { + background-color: var(--info-light); +} + +.text-info{ + color: #096b72 !important; +} +.build_subject_buttons { + display: flex; +} +.build_buttons { + margin-left: auto +} + +.bg-killed { + background-color: #aaa; +} + +.badge-killed { + background-color: #aaa; +} + +.table-condensed td { + padding: 0.25rem; +} + +.line-through { + text-decoration: line-through; +} + +.badge-light{ + border: 1px solid #AAA; +} +.arrow{ + display: none; +} + +.badge-light:hover .arrow{ + display: inline; +} + +.slot_button_group { + display: flex; + padding: 0 1px; +} + +.slot_button_group .btn { + flex: 0 0 25px; +} + +.slot_button_group .btn.slot_name { + width: 40px; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + flex: 1 1 auto; + text-align: left; +} + +.batch_header { + padding: 6px; + padding-bottom: 0px; +} + +.batch_slots { + display: flex; + flex-wrap: wrap; + padding: 6px; +} + +.batch_commits { + background-color: white; +} + +.batch_commits { + padding: 2px; +} + +.match_type_new { + background-color: var(--info-light); +} +.batch_row { + .slot_container{ + flex: 1 0 200px; + padding: 0 4px; + } + .slot_filler { + width: 100px; + height: 0px; + flex: 1 0 200px; + padding: 0 4px; + } +} +.bundle_row { + border-bottom: 1px solid var(--gray); + .batch_commits { + font-size: 80%; + } + .slot_container{ + flex:1 0 50%; + } + .slot_filler { + flex:1 0 50%; + } + .more { + .batch_commits { + display: block; + } + } + .nomore { + .batch_commits { + display: none; + padding:8px; + } + } + .nomore.batch_tile:hover { + .batch_commits { + display: block; + position: absolute; + bottom: 1px; + transform: translateY(100%); + z-index: 100; + border: 1px solid rgba(0, 0, 0, 0.125); + border-radius: 0.2rem; + box-sizing: border-box; + margin-left:-1px; + } + } +} diff --git a/runbot/static/src/js/runbot.js b/runbot/static/src/js/runbot.js index 9d3bea88..527fdb99 100644 --- a/runbot/static/src/js/runbot.js +++ b/runbot/static/src/js/runbot.js @@ -2,8 +2,7 @@ "use strict"; var OPMAP = { - 'rebuild': {operation: 'force', then: 'redirect'}, - 'rebuild-exact': {operation: 'force/1', then: 'redirect'}, + 'rebuild': {operation: 'rebuild', then: 'redirect'}, 'kill': {operation: 'kill', then: 'reload'}, 'wakeup': {operation: 'wakeup', then: 'reload'} }; diff --git a/runbot/templates/assets.xml b/runbot/templates/assets.xml index cb1be972..92a2d16c 100644 --- a/runbot/templates/assets.xml +++ b/runbot/templates/assets.xml @@ -1,10 +1,10 @@ - -