2024-05-16 15:36:34 +07:00
|
|
|
from __future__ import annotations
|
|
|
|
|
2024-06-11 20:41:03 +07:00
|
|
|
import select
|
2024-07-17 18:04:40 +07:00
|
|
|
import shutil
|
2024-06-11 20:41:03 +07:00
|
|
|
import threading
|
2024-05-16 15:36:34 +07:00
|
|
|
from typing import Optional
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
"""
|
|
|
|
Configuration:
|
|
|
|
|
|
|
|
* an ``odoo`` binary in the path, which runs the relevant odoo; to ensure a
|
|
|
|
clean slate odoo is re-started and a new database is created before each
|
|
|
|
test (technically a "template" db is created first, then that DB is cloned
|
|
|
|
and the fresh clone is used for each test)
|
|
|
|
|
|
|
|
* pytest.ini (at the root of the runbot repo or higher) with the following
|
|
|
|
sections and keys
|
|
|
|
|
|
|
|
``github``
|
|
|
|
- owner, the name of the account (personal or org) under which test repos
|
|
|
|
will be created & deleted (note: some repos might be created under role
|
|
|
|
accounts as well)
|
|
|
|
- token, either personal or oauth, must have the scopes ``public_repo``,
|
|
|
|
``delete_repo`` and ``admin:repo_hook``, if personal the owner must be
|
|
|
|
the corresponding user account, not an org. Also user:email for the
|
|
|
|
forwardport / forwardbot tests
|
|
|
|
|
|
|
|
``role_reviewer``, ``role_self_reviewer`` and ``role_other``
|
|
|
|
- name (optional, used as partner name when creating that, otherwise github
|
|
|
|
login gets used)
|
2021-10-06 18:06:53 +07:00
|
|
|
- email (optional, used as partner email when creating that, otherwise
|
|
|
|
github email gets used, reviewer and self-reviewer must have an email)
|
2019-10-10 14:22:12 +07:00
|
|
|
- token, a personal access token with the ``public_repo`` scope (otherwise
|
|
|
|
the API can't leave comments), maybe eventually delete_repo (for personal
|
|
|
|
forks)
|
|
|
|
|
|
|
|
.. warning:: the accounts must *not* be flagged, or the webhooks on
|
|
|
|
commenting or creating reviews will not trigger, and the
|
|
|
|
tests will fail
|
|
|
|
|
|
|
|
* either ``ngrok`` or ``lt`` (localtunnel) available on the path. ngrok with
|
|
|
|
a configured account is recommended: ngrok is more reliable than localtunnel
|
|
|
|
but a free account is necessary to get a high-enough rate limiting for some
|
|
|
|
of the multi-repo tests to work
|
|
|
|
|
|
|
|
Finally the tests aren't 100% reliable as they rely on quite a bit of network
|
|
|
|
traffic, it's possible that the tests fail due to network issues rather than
|
|
|
|
logic errors.
|
|
|
|
"""
|
|
|
|
import base64
|
|
|
|
import collections
|
2019-08-23 21:16:30 +07:00
|
|
|
import configparser
|
2021-09-22 12:38:12 +07:00
|
|
|
import contextlib
|
2019-10-10 14:22:12 +07:00
|
|
|
import copy
|
[IMP] *: create a single template db per module to test
Before this, when testing in parallel (using xdist) each worker would
create its own template database (per module, so 2) then would copy
the database for each test.
This is pretty inefficient as the init of a db is quite expensive in
CPU, and when increasing the number of workers (as the test suite is
rather IO bound) this would trigger a stampede as every worker would
try to create a template at the start of the test suite, leading to
extremely high loads and degraded host performances (e.g. 16 workers
would cause a load of 20 on a 4 cores 8 thread machine, which makes
its use difficult).
Instead we can have a lockfile at a known location of the filesystem,
the first worker to need a template for a module install locks it,
creates the templates, then writes the template's name to the
lockfile.
Every other worker can then lock the lockfile and read the name out,
using the db for duplication.
Note: needs to use `os.open` because the modes of `open` apparently
can't express "open at offset 0 for reading or create for writing",
`r+` refuses to create the file, `w+` still truncates, and `a+` is
undocumented and might not allow seeking back to the start on all
systems so better avoid it.
The implementation would be simplified by using `lockfile` but that's
an additional dependency plus it's deprecated. It recommends
`fasteners` but that seems to suck (not clear if storing stuff in the
lockfile is supported, it opens the lockfile in append mode). Here the
lockfiles are sufficient to do the entire thing.
Conveniently, this turns out to improve *both* walltime CPU time
compared to the original version, likely because while workers now
have to wait on whoever is creating the template they're not competing
for resources with it.
2023-06-27 17:51:23 +07:00
|
|
|
import fcntl
|
2021-10-06 18:06:53 +07:00
|
|
|
import functools
|
2021-02-24 15:32:58 +07:00
|
|
|
import http.client
|
2019-10-10 14:22:12 +07:00
|
|
|
import itertools
|
2020-01-24 19:30:55 +07:00
|
|
|
import os
|
2022-06-03 17:08:49 +07:00
|
|
|
import pathlib
|
|
|
|
import pprint
|
2020-01-24 19:30:55 +07:00
|
|
|
import random
|
2019-08-23 21:16:30 +07:00
|
|
|
import re
|
2019-10-10 14:22:12 +07:00
|
|
|
import socket
|
2019-08-23 21:16:30 +07:00
|
|
|
import subprocess
|
2019-10-10 14:22:12 +07:00
|
|
|
import sys
|
2022-06-03 17:08:49 +07:00
|
|
|
import tempfile
|
2019-08-23 21:16:30 +07:00
|
|
|
import time
|
2019-09-23 18:54:42 +07:00
|
|
|
import uuid
|
2021-04-06 15:52:51 +07:00
|
|
|
import warnings
|
2019-10-10 14:22:12 +07:00
|
|
|
import xmlrpc.client
|
|
|
|
from contextlib import closing
|
2019-08-23 21:16:30 +07:00
|
|
|
|
|
|
|
import pytest
|
|
|
|
import requests
|
|
|
|
|
2019-08-23 21:16:30 +07:00
|
|
|
NGROK_CLI = [
|
|
|
|
'ngrok', 'start', '--none', '--region', 'eu',
|
|
|
|
]
|
|
|
|
|
2019-08-23 21:16:30 +07:00
|
|
|
def pytest_addoption(parser):
|
2019-09-23 18:54:42 +07:00
|
|
|
parser.addoption('--addons-path')
|
|
|
|
parser.addoption("--no-delete", action="store_true", help="Don't delete repo after a failed run")
|
2020-01-27 18:44:41 +07:00
|
|
|
parser.addoption('--log-github', action='store_true')
|
2022-10-27 16:25:25 +07:00
|
|
|
parser.addoption('--coverage', action='store_true')
|
2019-09-23 18:54:42 +07:00
|
|
|
|
2019-08-23 21:16:30 +07:00
|
|
|
parser.addoption(
|
2022-08-24 16:17:01 +07:00
|
|
|
'--tunnel', action="store", type="choice", choices=['', 'ngrok', 'localtunnel'], default='',
|
2019-08-23 21:16:30 +07:00
|
|
|
help="Which tunneling method to use to expose the local Odoo server "
|
|
|
|
"to hook up github's webhook. ngrok is more reliable, but "
|
|
|
|
"creating a free account is necessary to avoid rate-limiting "
|
|
|
|
"issues (anonymous limiting is rate-limited at 20 incoming "
|
|
|
|
"queries per minute, free is 40, multi-repo batching tests will "
|
|
|
|
"blow through the former); localtunnel has no rate-limiting but "
|
|
|
|
"the servers are way less reliable")
|
|
|
|
|
[IMP] *: create a single template db per module to test
Before this, when testing in parallel (using xdist) each worker would
create its own template database (per module, so 2) then would copy
the database for each test.
This is pretty inefficient as the init of a db is quite expensive in
CPU, and when increasing the number of workers (as the test suite is
rather IO bound) this would trigger a stampede as every worker would
try to create a template at the start of the test suite, leading to
extremely high loads and degraded host performances (e.g. 16 workers
would cause a load of 20 on a 4 cores 8 thread machine, which makes
its use difficult).
Instead we can have a lockfile at a known location of the filesystem,
the first worker to need a template for a module install locks it,
creates the templates, then writes the template's name to the
lockfile.
Every other worker can then lock the lockfile and read the name out,
using the db for duplication.
Note: needs to use `os.open` because the modes of `open` apparently
can't express "open at offset 0 for reading or create for writing",
`r+` refuses to create the file, `w+` still truncates, and `a+` is
undocumented and might not allow seeking back to the start on all
systems so better avoid it.
The implementation would be simplified by using `lockfile` but that's
an additional dependency plus it's deprecated. It recommends
`fasteners` but that seems to suck (not clear if storing stuff in the
lockfile is supported, it opens the lockfile in append mode). Here the
lockfiles are sufficient to do the entire thing.
Conveniently, this turns out to improve *both* walltime CPU time
compared to the original version, likely because while workers now
have to wait on whoever is creating the template they're not competing
for resources with it.
2023-06-27 17:51:23 +07:00
|
|
|
def is_manager(config):
|
|
|
|
return not hasattr(config, 'workerinput')
|
2020-11-17 21:21:21 +07:00
|
|
|
|
|
|
|
def pytest_configure(config):
|
|
|
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'mergebot_test_utils'))
|
2024-06-11 20:41:03 +07:00
|
|
|
config.addinivalue_line(
|
|
|
|
"markers",
|
|
|
|
"expect_log_errors(reason): allow and require tracebacks in the log",
|
|
|
|
)
|
2022-08-24 16:17:01 +07:00
|
|
|
|
[IMP] *: create a single template db per module to test
Before this, when testing in parallel (using xdist) each worker would
create its own template database (per module, so 2) then would copy
the database for each test.
This is pretty inefficient as the init of a db is quite expensive in
CPU, and when increasing the number of workers (as the test suite is
rather IO bound) this would trigger a stampede as every worker would
try to create a template at the start of the test suite, leading to
extremely high loads and degraded host performances (e.g. 16 workers
would cause a load of 20 on a 4 cores 8 thread machine, which makes
its use difficult).
Instead we can have a lockfile at a known location of the filesystem,
the first worker to need a template for a module install locks it,
creates the templates, then writes the template's name to the
lockfile.
Every other worker can then lock the lockfile and read the name out,
using the db for duplication.
Note: needs to use `os.open` because the modes of `open` apparently
can't express "open at offset 0 for reading or create for writing",
`r+` refuses to create the file, `w+` still truncates, and `a+` is
undocumented and might not allow seeking back to the start on all
systems so better avoid it.
The implementation would be simplified by using `lockfile` but that's
an additional dependency plus it's deprecated. It recommends
`fasteners` but that seems to suck (not clear if storing stuff in the
lockfile is supported, it opens the lockfile in append mode). Here the
lockfiles are sufficient to do the entire thing.
Conveniently, this turns out to improve *both* walltime CPU time
compared to the original version, likely because while workers now
have to wait on whoever is creating the template they're not competing
for resources with it.
2023-06-27 17:51:23 +07:00
|
|
|
def pytest_unconfigure(config):
|
|
|
|
if not is_manager(config):
|
|
|
|
return
|
|
|
|
|
|
|
|
for c in config._tmp_path_factory.getbasetemp().iterdir():
|
|
|
|
if c.is_file() and c.name.startswith('template-'):
|
|
|
|
subprocess.run(['dropdb', '--if-exists', c.read_text(encoding='utf-8')])
|
2020-11-17 21:21:21 +07:00
|
|
|
|
2019-10-10 17:07:57 +07:00
|
|
|
@pytest.fixture(scope='session', autouse=True)
|
|
|
|
def _set_socket_timeout():
|
|
|
|
""" Avoid unlimited wait on standard sockets during tests, this is mostly
|
|
|
|
an issue for non-trivial cron calls
|
|
|
|
"""
|
2020-01-27 21:39:25 +07:00
|
|
|
socket.setdefaulttimeout(120.0)
|
2019-10-10 17:07:57 +07:00
|
|
|
|
2019-08-23 21:16:30 +07:00
|
|
|
@pytest.fixture(scope="session")
|
|
|
|
def config(pytestconfig):
|
|
|
|
""" Flat version of the pytest config file (pytest.ini), parses to a
|
|
|
|
simple dict of {section: {key: value}}
|
|
|
|
|
|
|
|
"""
|
|
|
|
conf = configparser.ConfigParser(interpolation=None)
|
|
|
|
conf.read([pytestconfig.inifile])
|
2019-10-10 14:22:12 +07:00
|
|
|
cnf = {
|
2019-08-23 21:16:30 +07:00
|
|
|
name: dict(s.items())
|
|
|
|
for name, s in conf.items()
|
|
|
|
}
|
2019-10-10 14:22:12 +07:00
|
|
|
# special case user / owner / ...
|
|
|
|
cnf['role_user'] = {
|
|
|
|
'token': conf['github']['token']
|
|
|
|
}
|
|
|
|
return cnf
|
2019-08-23 21:16:30 +07:00
|
|
|
|
|
|
|
@pytest.fixture(scope='session')
|
2020-07-10 17:52:14 +07:00
|
|
|
def rolemap(request, config):
|
|
|
|
# hack because capsys is not session-scoped
|
|
|
|
capmanager = request.config.pluginmanager.getplugin("capturemanager")
|
2019-08-23 21:16:30 +07:00
|
|
|
# only fetch github logins once per session
|
|
|
|
rolemap = {}
|
|
|
|
for k, data in config.items():
|
|
|
|
if k.startswith('role_'):
|
|
|
|
role = k[5:]
|
|
|
|
elif k == 'github':
|
|
|
|
role = 'user'
|
|
|
|
else:
|
|
|
|
continue
|
|
|
|
|
2020-07-10 17:52:14 +07:00
|
|
|
with capmanager.global_and_fixture_disabled():
|
|
|
|
r = _rate_limited(lambda: requests.get('https://api.github.com/user', headers={'Authorization': 'token %s' % data['token']}))
|
2019-08-23 21:16:30 +07:00
|
|
|
r.raise_for_status()
|
|
|
|
|
2021-08-02 14:18:30 +07:00
|
|
|
user = rolemap[role] = r.json()
|
|
|
|
data['user'] = user['login']
|
2019-08-23 21:16:30 +07:00
|
|
|
return rolemap
|
|
|
|
|
|
|
|
@pytest.fixture
|
2020-02-10 21:05:08 +07:00
|
|
|
def partners(env, config, rolemap):
|
[FIX] runbot_merge: tracking message author on PullRequest events
d4fa1fd35315d330566e37f515a937f722859ef7 added tracking to changes
from *comments* (as well as a few hacks around authorship transfer),
however it missed two things:
First, it set the `change-author` during comments handling only, so
changes from the `PullRequest` hook e.g. open, synchronise, close,
edit, don't get attributed to their actual source, and instead just
fall back to uid(1). This is easy enough to fix as the `sender` is
always provided, that can be resolved to a partner which is then set
as the author of whatever changes happen.
Second, I actually missed one of the message hooks: there's both
`_message_log` and `_message_log_batch` and they don't call one
another, so both have to be overridden in order for tracking to be
consistent. In this case, specifically, the *creation* of a tracked
object goes through `_message_log_batch` (since that's a very generic
message and so works on every tracked object created during the
transaction... even though batch has a message per record anyway...)
while *updates* go through `_message_log`.
Fixes #895
2024-06-21 21:33:44 +07:00
|
|
|
"""This specifically does not create partners for ``user`` and ``other``
|
|
|
|
so they can be generated on-interaction, as "external" users.
|
|
|
|
|
|
|
|
The two differ in that ``user`` has ownership of the org and can manage
|
|
|
|
repos there, ``other`` is completely unrelated to anything so useful to
|
|
|
|
check for interaction where the author only has read access to the reference
|
|
|
|
repositories.
|
|
|
|
"""
|
2021-10-06 18:06:53 +07:00
|
|
|
m = {}
|
2021-08-02 14:18:30 +07:00
|
|
|
for role, u in rolemap.items():
|
2019-08-23 21:16:30 +07:00
|
|
|
if role in ('user', 'other'):
|
|
|
|
continue
|
|
|
|
|
2021-08-02 14:18:30 +07:00
|
|
|
login = u['login']
|
2021-10-06 18:06:53 +07:00
|
|
|
conf = config['role_' + role]
|
2020-02-10 21:05:08 +07:00
|
|
|
m[role] = env['res.partner'].create({
|
2021-10-06 18:06:53 +07:00
|
|
|
'name': conf.get('name', login),
|
|
|
|
'email': conf.get('email') or u['email'] or False,
|
2019-08-23 21:16:30 +07:00
|
|
|
'github_login': login,
|
|
|
|
})
|
2020-02-10 21:05:08 +07:00
|
|
|
return m
|
2019-08-23 21:16:30 +07:00
|
|
|
|
2020-02-10 21:05:08 +07:00
|
|
|
@pytest.fixture
|
|
|
|
def setreviewers(partners):
|
|
|
|
def _(*repos):
|
|
|
|
partners['reviewer'].write({
|
|
|
|
'review_rights': [
|
|
|
|
(0, 0, {'repository_id': repo.id, 'review': True})
|
|
|
|
for repo in repos
|
|
|
|
]
|
|
|
|
})
|
|
|
|
partners['self_reviewer'].write({
|
|
|
|
'review_rights': [
|
|
|
|
(0, 0, {'repository_id': repo.id, 'self_review': True})
|
|
|
|
for repo in repos
|
|
|
|
]
|
|
|
|
})
|
|
|
|
return _
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def users(partners, rolemap):
|
2021-08-02 14:18:30 +07:00
|
|
|
return {k: v['login'] for k, v in rolemap.items()}
|
2019-08-23 21:16:30 +07:00
|
|
|
|
|
|
|
@pytest.fixture(scope='session')
|
|
|
|
def tunnel(pytestconfig, port):
|
|
|
|
""" Creates a tunnel to localhost:<port> using ngrok or localtunnel, should yield the
|
|
|
|
publicly routable address & terminate the process at the end of the session
|
|
|
|
"""
|
|
|
|
tunnel = pytestconfig.getoption('--tunnel')
|
2022-08-24 16:17:01 +07:00
|
|
|
if tunnel == '':
|
2022-10-12 15:46:56 +07:00
|
|
|
yield f'http://localhost:{port}'
|
2022-08-24 16:17:01 +07:00
|
|
|
elif tunnel == 'ngrok':
|
2024-01-16 20:54:24 +07:00
|
|
|
own = None
|
2021-09-22 12:38:12 +07:00
|
|
|
web_addr = 'http://localhost:4040/api'
|
2019-08-23 21:16:30 +07:00
|
|
|
addr = 'localhost:%d' % port
|
2021-09-22 12:38:12 +07:00
|
|
|
# try to find out if ngrok is running, and if it's not attempt
|
|
|
|
# to start it
|
2019-08-23 21:16:30 +07:00
|
|
|
try:
|
2021-09-22 12:38:12 +07:00
|
|
|
# FIXME: this is for xdist to avoid workers running ngrok at the
|
|
|
|
# exact same time, use lockfile instead
|
|
|
|
time.sleep(random.SystemRandom().randint(1, 10))
|
|
|
|
requests.get(web_addr)
|
2019-08-23 21:16:30 +07:00
|
|
|
except requests.exceptions.ConnectionError:
|
2024-01-16 20:54:24 +07:00
|
|
|
own = subprocess.Popen(NGROK_CLI, stdout=subprocess.DEVNULL)
|
2021-09-22 12:38:12 +07:00
|
|
|
for _ in range(5):
|
|
|
|
time.sleep(1)
|
|
|
|
with contextlib.suppress(requests.exceptions.ConnectionError):
|
|
|
|
requests.get(web_addr)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise Exception("Unable to connect to ngrok")
|
|
|
|
|
|
|
|
requests.post(f'{web_addr}/tunnels', json={
|
2019-08-23 21:16:30 +07:00
|
|
|
'name': str(port),
|
|
|
|
'proto': 'http',
|
|
|
|
'addr': addr,
|
2024-01-16 20:54:24 +07:00
|
|
|
'schemes': ['https'],
|
2019-10-10 14:22:12 +07:00
|
|
|
'inspect': True,
|
|
|
|
}).raise_for_status()
|
2019-08-23 21:16:30 +07:00
|
|
|
|
2021-09-22 12:38:12 +07:00
|
|
|
tunnel = f'{web_addr}/tunnels/{port}'
|
2019-10-10 14:22:12 +07:00
|
|
|
for _ in range(10):
|
|
|
|
time.sleep(2)
|
|
|
|
r = requests.get(tunnel)
|
|
|
|
# not created yet, wait and retry
|
|
|
|
if r.status_code == 404:
|
|
|
|
continue
|
|
|
|
# check for weird responses
|
2019-08-23 21:16:30 +07:00
|
|
|
r.raise_for_status()
|
2019-10-10 14:22:12 +07:00
|
|
|
try:
|
|
|
|
yield r.json()['public_url']
|
|
|
|
finally:
|
2021-09-22 12:38:12 +07:00
|
|
|
requests.delete(tunnel)
|
2019-10-10 14:22:12 +07:00
|
|
|
for _ in range(10):
|
|
|
|
time.sleep(1)
|
|
|
|
r = requests.get(tunnel)
|
|
|
|
# check if deletion is done
|
|
|
|
if r.status_code == 404:
|
|
|
|
break
|
|
|
|
r.raise_for_status()
|
|
|
|
else:
|
|
|
|
raise TimeoutError("ngrok tunnel deletion failed")
|
2019-08-23 21:16:30 +07:00
|
|
|
|
2021-09-22 12:38:12 +07:00
|
|
|
r = requests.get(f'{web_addr}/tunnels')
|
2024-01-16 20:54:24 +07:00
|
|
|
assert r.ok, f'{r.reason} {r.text}'
|
2019-10-10 14:22:12 +07:00
|
|
|
# there are still tunnels in the list -> bail
|
2024-01-16 20:54:24 +07:00
|
|
|
if not own or r.json()['tunnels']:
|
2019-10-10 14:22:12 +07:00
|
|
|
return
|
2019-08-23 21:16:30 +07:00
|
|
|
|
2024-01-16 20:54:24 +07:00
|
|
|
# no more tunnels and we started ngrok -> try to kill it
|
|
|
|
own.terminate()
|
|
|
|
own.wait(30)
|
2019-10-10 14:22:12 +07:00
|
|
|
else:
|
|
|
|
raise TimeoutError("ngrok tunnel creation failed (?)")
|
2019-08-23 21:16:30 +07:00
|
|
|
elif tunnel == 'localtunnel':
|
|
|
|
p = subprocess.Popen(['lt', '-p', str(port)], stdout=subprocess.PIPE)
|
|
|
|
try:
|
|
|
|
r = p.stdout.readline()
|
|
|
|
m = re.match(br'your url is: (https://.*\.localtunnel\.me)', r)
|
|
|
|
assert m, "could not get the localtunnel URL"
|
|
|
|
yield m.group(1).decode('ascii')
|
|
|
|
finally:
|
|
|
|
p.terminate()
|
|
|
|
p.wait(30)
|
|
|
|
else:
|
|
|
|
raise ValueError("Unsupported %s tunnel method" % tunnel)
|
2019-09-23 18:54:42 +07:00
|
|
|
|
2020-01-21 20:56:57 +07:00
|
|
|
class DbDict(dict):
|
[IMP] *: create a single template db per module to test
Before this, when testing in parallel (using xdist) each worker would
create its own template database (per module, so 2) then would copy
the database for each test.
This is pretty inefficient as the init of a db is quite expensive in
CPU, and when increasing the number of workers (as the test suite is
rather IO bound) this would trigger a stampede as every worker would
try to create a template at the start of the test suite, leading to
extremely high loads and degraded host performances (e.g. 16 workers
would cause a load of 20 on a 4 cores 8 thread machine, which makes
its use difficult).
Instead we can have a lockfile at a known location of the filesystem,
the first worker to need a template for a module install locks it,
creates the templates, then writes the template's name to the
lockfile.
Every other worker can then lock the lockfile and read the name out,
using the db for duplication.
Note: needs to use `os.open` because the modes of `open` apparently
can't express "open at offset 0 for reading or create for writing",
`r+` refuses to create the file, `w+` still truncates, and `a+` is
undocumented and might not allow seeking back to the start on all
systems so better avoid it.
The implementation would be simplified by using `lockfile` but that's
an additional dependency plus it's deprecated. It recommends
`fasteners` but that seems to suck (not clear if storing stuff in the
lockfile is supported, it opens the lockfile in append mode). Here the
lockfiles are sufficient to do the entire thing.
Conveniently, this turns out to improve *both* walltime CPU time
compared to the original version, likely because while workers now
have to wait on whoever is creating the template they're not competing
for resources with it.
2023-06-27 17:51:23 +07:00
|
|
|
def __init__(self, adpath, shared_dir):
|
2020-01-21 20:56:57 +07:00
|
|
|
super().__init__()
|
|
|
|
self._adpath = adpath
|
[IMP] *: create a single template db per module to test
Before this, when testing in parallel (using xdist) each worker would
create its own template database (per module, so 2) then would copy
the database for each test.
This is pretty inefficient as the init of a db is quite expensive in
CPU, and when increasing the number of workers (as the test suite is
rather IO bound) this would trigger a stampede as every worker would
try to create a template at the start of the test suite, leading to
extremely high loads and degraded host performances (e.g. 16 workers
would cause a load of 20 on a 4 cores 8 thread machine, which makes
its use difficult).
Instead we can have a lockfile at a known location of the filesystem,
the first worker to need a template for a module install locks it,
creates the templates, then writes the template's name to the
lockfile.
Every other worker can then lock the lockfile and read the name out,
using the db for duplication.
Note: needs to use `os.open` because the modes of `open` apparently
can't express "open at offset 0 for reading or create for writing",
`r+` refuses to create the file, `w+` still truncates, and `a+` is
undocumented and might not allow seeking back to the start on all
systems so better avoid it.
The implementation would be simplified by using `lockfile` but that's
an additional dependency plus it's deprecated. It recommends
`fasteners` but that seems to suck (not clear if storing stuff in the
lockfile is supported, it opens the lockfile in append mode). Here the
lockfiles are sufficient to do the entire thing.
Conveniently, this turns out to improve *both* walltime CPU time
compared to the original version, likely because while workers now
have to wait on whoever is creating the template they're not competing
for resources with it.
2023-06-27 17:51:23 +07:00
|
|
|
self._shared_dir = shared_dir
|
2020-01-21 20:56:57 +07:00
|
|
|
def __missing__(self, module):
|
[IMP] *: create a single template db per module to test
Before this, when testing in parallel (using xdist) each worker would
create its own template database (per module, so 2) then would copy
the database for each test.
This is pretty inefficient as the init of a db is quite expensive in
CPU, and when increasing the number of workers (as the test suite is
rather IO bound) this would trigger a stampede as every worker would
try to create a template at the start of the test suite, leading to
extremely high loads and degraded host performances (e.g. 16 workers
would cause a load of 20 on a 4 cores 8 thread machine, which makes
its use difficult).
Instead we can have a lockfile at a known location of the filesystem,
the first worker to need a template for a module install locks it,
creates the templates, then writes the template's name to the
lockfile.
Every other worker can then lock the lockfile and read the name out,
using the db for duplication.
Note: needs to use `os.open` because the modes of `open` apparently
can't express "open at offset 0 for reading or create for writing",
`r+` refuses to create the file, `w+` still truncates, and `a+` is
undocumented and might not allow seeking back to the start on all
systems so better avoid it.
The implementation would be simplified by using `lockfile` but that's
an additional dependency plus it's deprecated. It recommends
`fasteners` but that seems to suck (not clear if storing stuff in the
lockfile is supported, it opens the lockfile in append mode). Here the
lockfiles are sufficient to do the entire thing.
Conveniently, this turns out to improve *both* walltime CPU time
compared to the original version, likely because while workers now
have to wait on whoever is creating the template they're not competing
for resources with it.
2023-06-27 17:51:23 +07:00
|
|
|
with contextlib.ExitStack() as atexit:
|
|
|
|
f = atexit.enter_context(os.fdopen(os.open(
|
|
|
|
self._shared_dir / f'template-{module}',
|
|
|
|
os.O_CREAT | os.O_RDWR
|
|
|
|
), mode="r+", encoding='utf-8'))
|
|
|
|
fcntl.lockf(f, fcntl.LOCK_EX)
|
|
|
|
atexit.callback(fcntl.lockf, f, fcntl.LOCK_UN)
|
|
|
|
|
|
|
|
db = f.read()
|
|
|
|
if db:
|
|
|
|
self[module] = db
|
|
|
|
return db
|
|
|
|
|
2024-07-17 18:04:40 +07:00
|
|
|
d = (self._shared_dir / f'shared-{module}')
|
|
|
|
d.mkdir()
|
[IMP] *: create a single template db per module to test
Before this, when testing in parallel (using xdist) each worker would
create its own template database (per module, so 2) then would copy
the database for each test.
This is pretty inefficient as the init of a db is quite expensive in
CPU, and when increasing the number of workers (as the test suite is
rather IO bound) this would trigger a stampede as every worker would
try to create a template at the start of the test suite, leading to
extremely high loads and degraded host performances (e.g. 16 workers
would cause a load of 20 on a 4 cores 8 thread machine, which makes
its use difficult).
Instead we can have a lockfile at a known location of the filesystem,
the first worker to need a template for a module install locks it,
creates the templates, then writes the template's name to the
lockfile.
Every other worker can then lock the lockfile and read the name out,
using the db for duplication.
Note: needs to use `os.open` because the modes of `open` apparently
can't express "open at offset 0 for reading or create for writing",
`r+` refuses to create the file, `w+` still truncates, and `a+` is
undocumented and might not allow seeking back to the start on all
systems so better avoid it.
The implementation would be simplified by using `lockfile` but that's
an additional dependency plus it's deprecated. It recommends
`fasteners` but that seems to suck (not clear if storing stuff in the
lockfile is supported, it opens the lockfile in append mode). Here the
lockfiles are sufficient to do the entire thing.
Conveniently, this turns out to improve *both* walltime CPU time
compared to the original version, likely because while workers now
have to wait on whoever is creating the template they're not competing
for resources with it.
2023-06-27 17:51:23 +07:00
|
|
|
self[module] = db = 'template_%s' % uuid.uuid4()
|
2022-07-19 20:56:24 +07:00
|
|
|
subprocess.run([
|
|
|
|
'odoo', '--no-http',
|
2023-06-22 19:37:49 +07:00
|
|
|
*(['--addons-path', self._adpath] if self._adpath else []),
|
2023-08-31 13:52:08 +07:00
|
|
|
'-d', db, '-i', module + ',saas_worker,auth_oauth',
|
2022-07-19 20:56:24 +07:00
|
|
|
'--max-cron-threads', '0',
|
|
|
|
'--stop-after-init',
|
|
|
|
'--log-level', 'warn'
|
|
|
|
],
|
|
|
|
check=True,
|
2024-07-17 18:04:40 +07:00
|
|
|
env={**os.environ, 'XDG_DATA_HOME': str(d)}
|
2022-07-19 20:56:24 +07:00
|
|
|
)
|
[IMP] *: create a single template db per module to test
Before this, when testing in parallel (using xdist) each worker would
create its own template database (per module, so 2) then would copy
the database for each test.
This is pretty inefficient as the init of a db is quite expensive in
CPU, and when increasing the number of workers (as the test suite is
rather IO bound) this would trigger a stampede as every worker would
try to create a template at the start of the test suite, leading to
extremely high loads and degraded host performances (e.g. 16 workers
would cause a load of 20 on a 4 cores 8 thread machine, which makes
its use difficult).
Instead we can have a lockfile at a known location of the filesystem,
the first worker to need a template for a module install locks it,
creates the templates, then writes the template's name to the
lockfile.
Every other worker can then lock the lockfile and read the name out,
using the db for duplication.
Note: needs to use `os.open` because the modes of `open` apparently
can't express "open at offset 0 for reading or create for writing",
`r+` refuses to create the file, `w+` still truncates, and `a+` is
undocumented and might not allow seeking back to the start on all
systems so better avoid it.
The implementation would be simplified by using `lockfile` but that's
an additional dependency plus it's deprecated. It recommends
`fasteners` but that seems to suck (not clear if storing stuff in the
lockfile is supported, it opens the lockfile in append mode). Here the
lockfiles are sufficient to do the entire thing.
Conveniently, this turns out to improve *both* walltime CPU time
compared to the original version, likely because while workers now
have to wait on whoever is creating the template they're not competing
for resources with it.
2023-06-27 17:51:23 +07:00
|
|
|
f.write(db)
|
|
|
|
f.flush()
|
|
|
|
os.fsync(f.fileno())
|
|
|
|
|
2020-01-21 20:56:57 +07:00
|
|
|
return db
|
|
|
|
|
2019-09-23 18:54:42 +07:00
|
|
|
@pytest.fixture(scope='session')
|
2023-08-31 13:52:08 +07:00
|
|
|
def dbcache(request, tmp_path_factory, addons_path):
|
2019-09-23 18:54:42 +07:00
|
|
|
""" Creates template DB once per run, then just duplicates it before
|
|
|
|
starting odoo and running the testcase
|
|
|
|
"""
|
[IMP] *: create a single template db per module to test
Before this, when testing in parallel (using xdist) each worker would
create its own template database (per module, so 2) then would copy
the database for each test.
This is pretty inefficient as the init of a db is quite expensive in
CPU, and when increasing the number of workers (as the test suite is
rather IO bound) this would trigger a stampede as every worker would
try to create a template at the start of the test suite, leading to
extremely high loads and degraded host performances (e.g. 16 workers
would cause a load of 20 on a 4 cores 8 thread machine, which makes
its use difficult).
Instead we can have a lockfile at a known location of the filesystem,
the first worker to need a template for a module install locks it,
creates the templates, then writes the template's name to the
lockfile.
Every other worker can then lock the lockfile and read the name out,
using the db for duplication.
Note: needs to use `os.open` because the modes of `open` apparently
can't express "open at offset 0 for reading or create for writing",
`r+` refuses to create the file, `w+` still truncates, and `a+` is
undocumented and might not allow seeking back to the start on all
systems so better avoid it.
The implementation would be simplified by using `lockfile` but that's
an additional dependency plus it's deprecated. It recommends
`fasteners` but that seems to suck (not clear if storing stuff in the
lockfile is supported, it opens the lockfile in append mode). Here the
lockfiles are sufficient to do the entire thing.
Conveniently, this turns out to improve *both* walltime CPU time
compared to the original version, likely because while workers now
have to wait on whoever is creating the template they're not competing
for resources with it.
2023-06-27 17:51:23 +07:00
|
|
|
shared_dir = tmp_path_factory.getbasetemp()
|
|
|
|
if not is_manager(request.config):
|
|
|
|
# xdist workers get a subdir as their basetemp, so we need to go one
|
|
|
|
# level up to deref it
|
|
|
|
shared_dir = shared_dir.parent
|
|
|
|
|
2023-08-31 13:52:08 +07:00
|
|
|
dbs = DbDict(addons_path, shared_dir)
|
2020-01-21 20:56:57 +07:00
|
|
|
yield dbs
|
2019-09-23 18:54:42 +07:00
|
|
|
|
|
|
|
@pytest.fixture
|
2024-07-17 18:04:40 +07:00
|
|
|
def db(request, module, dbcache, tmpdir):
|
|
|
|
template_db = dbcache[module]
|
2019-09-23 18:54:42 +07:00
|
|
|
rundb = str(uuid.uuid4())
|
2024-07-17 18:04:40 +07:00
|
|
|
subprocess.run(['createdb', '-T', template_db, rundb], check=True)
|
|
|
|
share = tmpdir.mkdir('share')
|
|
|
|
shutil.copytree(
|
|
|
|
str(dbcache._shared_dir / f'shared-{module}'),
|
|
|
|
str(share),
|
|
|
|
dirs_exist_ok=True,
|
|
|
|
)
|
|
|
|
(share / 'Odoo' / 'filestore' / template_db).rename(
|
|
|
|
share / 'Odoo' / 'filestore' / rundb)
|
2019-09-23 18:54:42 +07:00
|
|
|
|
|
|
|
yield rundb
|
|
|
|
|
|
|
|
if not request.config.getoption('--no-delete'):
|
|
|
|
subprocess.run(['dropdb', rundb], check=True)
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
def wait_for_hook(n=1):
|
|
|
|
time.sleep(10 * n)
|
|
|
|
|
|
|
|
def wait_for_server(db, port, proc, mod, timeout=120):
|
|
|
|
""" Polls for server to be response & have installed our module.
|
|
|
|
|
|
|
|
Raises socket.timeout on failure
|
|
|
|
"""
|
|
|
|
limit = time.time() + timeout
|
|
|
|
while True:
|
|
|
|
if proc.poll() is not None:
|
|
|
|
raise Exception("Server unexpectedly closed")
|
|
|
|
|
|
|
|
try:
|
|
|
|
uid = xmlrpc.client.ServerProxy(
|
|
|
|
'http://localhost:{}/xmlrpc/2/common'.format(port))\
|
|
|
|
.authenticate(db, 'admin', 'admin', {})
|
|
|
|
mods = xmlrpc.client.ServerProxy(
|
|
|
|
'http://localhost:{}/xmlrpc/2/object'.format(port))\
|
|
|
|
.execute_kw(
|
|
|
|
db, uid, 'admin', 'ir.module.module', 'search_read', [
|
|
|
|
[('name', '=', mod)], ['state']
|
|
|
|
])
|
|
|
|
if mods and mods[0].get('state') == 'installed':
|
|
|
|
break
|
|
|
|
except ConnectionRefusedError:
|
|
|
|
if time.time() > limit:
|
|
|
|
raise socket.timeout()
|
|
|
|
|
|
|
|
@pytest.fixture(scope='session')
|
|
|
|
def port():
|
|
|
|
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
|
|
|
|
s.bind(('', 0))
|
|
|
|
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
|
|
return s.getsockname()[1]
|
|
|
|
|
2024-06-26 19:34:34 +07:00
|
|
|
@pytest.fixture
|
|
|
|
def page(port):
|
|
|
|
with requests.Session() as s:
|
|
|
|
def get(url):
|
|
|
|
r = s.get('http://localhost:{}{}'.format(port, url))
|
|
|
|
r.raise_for_status()
|
|
|
|
return r.content
|
|
|
|
yield get
|
|
|
|
|
2022-06-03 17:08:49 +07:00
|
|
|
@pytest.fixture(scope='session')
|
|
|
|
def dummy_addons_path():
|
|
|
|
with tempfile.TemporaryDirectory() as dummy_addons_path:
|
|
|
|
mod = pathlib.Path(dummy_addons_path, 'saas_worker')
|
|
|
|
mod.mkdir(0o700)
|
2023-08-31 13:52:08 +07:00
|
|
|
(mod / '__init__.py').write_text('''\
|
|
|
|
from odoo import api, fields, models
|
|
|
|
|
|
|
|
|
|
|
|
class Base(models.AbstractModel):
|
|
|
|
_inherit = 'base'
|
|
|
|
|
|
|
|
def run_crons(self):
|
|
|
|
self.env['ir.cron']._process_jobs(self.env.cr.dbname)
|
|
|
|
return True
|
|
|
|
''', encoding='utf-8')
|
2022-06-03 17:08:49 +07:00
|
|
|
(mod / '__manifest__.py').write_text(pprint.pformat({
|
|
|
|
'name': 'dummy saas_worker',
|
|
|
|
'version': '1.0',
|
|
|
|
}), encoding='utf-8')
|
|
|
|
(mod / 'util.py').write_text("""\
|
2023-06-07 16:14:47 +07:00
|
|
|
def from_role(*_, **__):
|
2022-06-03 17:08:49 +07:00
|
|
|
return lambda fn: fn
|
|
|
|
""", encoding='utf-8')
|
|
|
|
|
|
|
|
yield dummy_addons_path
|
|
|
|
|
2023-08-31 13:52:08 +07:00
|
|
|
@pytest.fixture(scope='session')
|
|
|
|
def addons_path(request, dummy_addons_path):
|
|
|
|
return ','.join(map(str, filter(None, [
|
|
|
|
request.config.getoption('--addons-path'),
|
|
|
|
dummy_addons_path,
|
|
|
|
])))
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
@pytest.fixture
|
2023-08-31 13:52:08 +07:00
|
|
|
def server(request, db, port, module, addons_path, tmpdir):
|
2021-10-06 18:01:57 +07:00
|
|
|
log_handlers = [
|
|
|
|
'odoo.modules.loading:WARNING',
|
|
|
|
]
|
|
|
|
if not request.config.getoption('--log-github'):
|
|
|
|
log_handlers.append('github_requests:WARNING')
|
2020-01-27 18:44:41 +07:00
|
|
|
|
2022-10-27 16:25:25 +07:00
|
|
|
cov = []
|
|
|
|
if request.config.getoption('--coverage'):
|
|
|
|
cov = ['coverage', 'run', '-p', '--source=odoo.addons.runbot_merge,odoo.addons.forwardport', '--branch']
|
|
|
|
|
2024-06-11 20:41:03 +07:00
|
|
|
r, w = os.pipe2(os.O_NONBLOCK)
|
|
|
|
buf = bytearray()
|
|
|
|
def _move(inpt=r, output=sys.stdout.fileno()):
|
|
|
|
while p.poll() is None:
|
|
|
|
readable, _, _ = select.select([inpt], [], [], 1)
|
|
|
|
if readable:
|
|
|
|
r = os.read(inpt, 4096)
|
|
|
|
if not r:
|
|
|
|
break
|
|
|
|
os.write(output, r)
|
|
|
|
buf.extend(r)
|
|
|
|
os.close(inpt)
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
p = subprocess.Popen([
|
2022-10-27 16:25:25 +07:00
|
|
|
*cov,
|
2019-10-10 14:22:12 +07:00
|
|
|
'odoo', '--http-port', str(port),
|
2022-06-03 17:08:49 +07:00
|
|
|
'--addons-path', addons_path,
|
2021-10-06 18:01:57 +07:00
|
|
|
'-d', db,
|
2019-10-10 14:22:12 +07:00
|
|
|
'--max-cron-threads', '0', # disable cron threads (we're running crons by hand)
|
2021-10-06 18:01:57 +07:00
|
|
|
*itertools.chain.from_iterable(('--log-handler', h) for h in log_handlers),
|
2024-06-11 20:41:03 +07:00
|
|
|
], stderr=w, env={
|
2022-07-19 20:56:24 +07:00
|
|
|
**os.environ,
|
|
|
|
# stop putting garbage in the user dirs, and potentially creating conflicts
|
|
|
|
# TODO: way to override this with macOS?
|
2024-07-17 18:04:40 +07:00
|
|
|
'XDG_DATA_HOME': str(tmpdir / 'share'),
|
2022-07-19 20:56:24 +07:00
|
|
|
'XDG_CACHE_HOME': str(tmpdir.mkdir('cache')),
|
|
|
|
})
|
2024-06-11 20:41:03 +07:00
|
|
|
os.close(w)
|
|
|
|
# start the reader thread here so `_move` can read `p` without needing
|
|
|
|
# additional handholding
|
|
|
|
threading.Thread(target=_move, daemon=True).start()
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
try:
|
|
|
|
wait_for_server(db, port, p, module)
|
|
|
|
|
2024-06-11 20:41:03 +07:00
|
|
|
yield p, buf
|
2019-10-10 14:22:12 +07:00
|
|
|
finally:
|
|
|
|
p.terminate()
|
|
|
|
p.wait(timeout=30)
|
|
|
|
|
|
|
|
@pytest.fixture
|
2024-06-11 20:41:03 +07:00
|
|
|
def env(request, port, server, db, default_crons):
|
2019-10-10 14:22:12 +07:00
|
|
|
yield Environment(port, db, default_crons)
|
2024-06-11 20:41:03 +07:00
|
|
|
if request.node.get_closest_marker('expect_log_errors'):
|
|
|
|
if b"Traceback (most recent call last):" not in server[1]:
|
|
|
|
pytest.fail("should have found error in logs.")
|
|
|
|
else:
|
|
|
|
if b"Traceback (most recent call last):" in server[1]:
|
|
|
|
pytest.fail("unexpected error in logs, fix, or mark function as `expect_log_errors` to require.")
|
2019-10-10 14:22:12 +07:00
|
|
|
|
2024-06-25 03:16:43 +07:00
|
|
|
@pytest.fixture
|
|
|
|
def reviewer_admin(env, partners):
|
|
|
|
env['res.users'].create({
|
|
|
|
'partner_id': partners['reviewer'].id,
|
|
|
|
'login': 'reviewer',
|
|
|
|
'groups_id': [
|
|
|
|
(4, env.ref("base.group_user").id, 0),
|
|
|
|
(4, env.ref("runbot_merge.group_admin").id, 0),
|
|
|
|
],
|
|
|
|
})
|
|
|
|
|
2021-10-13 12:58:21 +07:00
|
|
|
def check(response):
|
|
|
|
assert response.ok, response.text or response.reason
|
|
|
|
return response
|
2019-10-10 14:22:12 +07:00
|
|
|
# users is just so I can avoid autouse on toplevel users fixture b/c it (seems
|
|
|
|
# to) break the existing local tests
|
|
|
|
@pytest.fixture
|
2020-07-10 17:52:14 +07:00
|
|
|
def make_repo(capsys, request, config, tunnel, users):
|
[ADD] *: per-repository webhook secret
Currently webhook secrets are configured per *project* which is an
issue both because different repositories may have different
administrators and thus creates safety concerns, and because multiple
repositories can feed into different projects (e.g. on mergebot,
odoo-dev/odoo is both an ancillary repository to the main RD project,
and the main repository to the minor / legacy master-wowl
project). This means it can be necessary to have multiple projects
share the same secret as well, this then mandates the secret for more
repositories per (1).
This is a pain in the ass, so just detach secrets from projects and
link them *only* to repositories, it's cleaner and easier to manage
and set up progressively.
This requires a lot of changes to the tests, as they all need to
correctly configure the signaling.
For `runbot_merge` there was *some* setup sharing already via the
module-level `repo` fixtures`, those were merged into a conftest-level
fixture which could handle the signaling setup. A few tests which
unnecessarily set up repositories ad-hoc were also moved to the
fixture. But for most of the ad-hoc setup in `runbot_merge`, as well
as `forwardport` where it's all ad-hoc, events sources setup was just
appended as is. This should probably be cleaned up at one point, with
the various requirements collected and organised into a small set of
fixtures doing the job more uniformly.
Fixes #887
2024-06-06 16:07:57 +07:00
|
|
|
"""Fixtures which creates a repository on the github side, plugs webhooks
|
|
|
|
in, and registers the repository for deletion on cleanup (unless
|
|
|
|
``--no-delete`` is set)
|
|
|
|
"""
|
2019-10-10 14:22:12 +07:00
|
|
|
owner = config['github']['owner']
|
|
|
|
github = requests.Session()
|
|
|
|
github.headers['Authorization'] = 'token %s' % config['github']['token']
|
|
|
|
|
|
|
|
# check whether "owner" is a user or an org, as repo-creation endpoint is
|
|
|
|
# different
|
2020-07-10 17:52:14 +07:00
|
|
|
with capsys.disabled():
|
|
|
|
q = _rate_limited(lambda: github.get('https://api.github.com/users/{}'.format(owner)))
|
2019-10-10 14:22:12 +07:00
|
|
|
q.raise_for_status()
|
|
|
|
if q.json().get('type') == 'Organization':
|
|
|
|
endpoint = 'https://api.github.com/orgs/{}/repos'.format(owner)
|
|
|
|
else:
|
|
|
|
endpoint = 'https://api.github.com/user/repos'
|
2021-10-13 12:58:21 +07:00
|
|
|
r = check(github.get('https://api.github.com/user'))
|
2019-10-10 14:22:12 +07:00
|
|
|
assert r.json()['login'] == owner
|
|
|
|
|
|
|
|
repos = []
|
|
|
|
def repomaker(name):
|
2020-01-24 19:30:55 +07:00
|
|
|
name = 'ignore_%s_%s' % (name, base64.b64encode(os.urandom(6), b'-_').decode())
|
2019-10-10 14:22:12 +07:00
|
|
|
fullname = '{}/{}'.format(owner, name)
|
|
|
|
repo_url = 'https://api.github.com/repos/{}'.format(fullname)
|
|
|
|
|
|
|
|
# create repo
|
2021-10-13 12:58:21 +07:00
|
|
|
r = check(github.post(endpoint, json={
|
2019-10-10 14:22:12 +07:00
|
|
|
'name': name,
|
|
|
|
'has_issues': False,
|
|
|
|
'has_projects': False,
|
|
|
|
'has_wiki': False,
|
|
|
|
'auto_init': False,
|
|
|
|
# at least one merge method must be enabled :(
|
|
|
|
'allow_squash_merge': False,
|
|
|
|
# 'allow_merge_commit': False,
|
|
|
|
'allow_rebase_merge': False,
|
2021-10-13 12:58:21 +07:00
|
|
|
}))
|
2021-11-16 19:57:32 +07:00
|
|
|
r = r.json()
|
|
|
|
# wait for repository visibility
|
|
|
|
while True:
|
|
|
|
time.sleep(1)
|
|
|
|
if github.head(r['url']).ok:
|
|
|
|
break
|
|
|
|
|
2021-04-06 15:52:51 +07:00
|
|
|
repo = Repo(github, fullname, repos)
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
# create webhook
|
2021-10-13 12:58:21 +07:00
|
|
|
check(github.post('{}/hooks'.format(repo_url), json={
|
2019-10-10 14:22:12 +07:00
|
|
|
'name': 'web',
|
|
|
|
'config': {
|
|
|
|
'url': '{}/runbot_merge/hooks'.format(tunnel),
|
|
|
|
'content_type': 'json',
|
|
|
|
'insecure_ssl': '1',
|
|
|
|
},
|
|
|
|
'events': ['pull_request', 'issue_comment', 'status', 'pull_request_review']
|
2021-10-13 12:58:21 +07:00
|
|
|
}))
|
2021-11-16 19:57:32 +07:00
|
|
|
time.sleep(1)
|
2019-10-10 14:22:12 +07:00
|
|
|
|
2021-10-13 12:58:21 +07:00
|
|
|
check(github.put('{}/contents/{}'.format(repo_url, 'a'), json={
|
2019-10-10 14:22:12 +07:00
|
|
|
'path': 'a',
|
|
|
|
'message': 'github returns a 409 (Git Repository is Empty) if trying to create a tree in a repo with no objects',
|
|
|
|
'content': base64.b64encode(b'whee').decode('ascii'),
|
|
|
|
'branch': 'garbage_%s' % uuid.uuid4()
|
2021-10-13 12:58:21 +07:00
|
|
|
}))
|
2021-11-16 19:57:32 +07:00
|
|
|
time.sleep(1)
|
2021-04-06 15:52:51 +07:00
|
|
|
return repo
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
yield repomaker
|
|
|
|
|
|
|
|
if not request.config.getoption('--no-delete'):
|
|
|
|
for repo in reversed(repos):
|
|
|
|
repo.delete()
|
|
|
|
|
2020-07-10 17:52:14 +07:00
|
|
|
|
|
|
|
def _rate_limited(req):
|
|
|
|
while True:
|
|
|
|
q = req()
|
|
|
|
if not q.ok and q.headers.get('X-RateLimit-Remaining') == '0':
|
|
|
|
reset = int(q.headers['X-RateLimit-Reset'])
|
2021-08-02 14:18:30 +07:00
|
|
|
delay = max(0, round(reset - time.time() + 1.0))
|
2020-07-10 17:52:14 +07:00
|
|
|
print("Hit rate limit, sleeping for", delay, "seconds")
|
|
|
|
time.sleep(delay)
|
|
|
|
continue
|
|
|
|
break
|
|
|
|
return q
|
|
|
|
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
Commit = collections.namedtuple('Commit', 'id tree message author committer parents')
|
|
|
|
class Repo:
|
|
|
|
def __init__(self, session, fullname, repos):
|
|
|
|
self._session = session
|
|
|
|
self.name = fullname
|
|
|
|
self._repos = repos
|
|
|
|
self.hook = False
|
|
|
|
repos.append(self)
|
|
|
|
|
[CHG] runbot_merge: switch staging from github API to local
It has been a consideration for a while, but the pain of subtly
interacting with git via the ignominous CLI kept it back. Then ~~the
fire nation attacked~~ github got more and more tight-fisted (and in
some ways less reliable) with their API.
Staging pretty much just interacts with the git database, so it's both
a facultative github operator (it can just interact with git directly)
and a big consumer of API requests (because the git database endpoints
are very low level so it takes quite a bit of work to do anything
especially when high-level operations like rebase have to be
replicated by hand).
Furthermore, an issue has also been noticed which can be attributed to
using the github API (and that API's reliability getting worse): in
some cases github will fail to propagate a ref update / reset, so when
staging 2 PRs it's possible that the second one is merged on top of
the temporary branch of the first one, yielding a kinda broken commit
(in that it's a merge commit with a broken error message) instead of
the rebase / squash commit we expected.
As it turns out it's a very old issue but only happened very early so
was misattributed and not (sufficiently) guarded against:
- 41bd82244bb976bbd4d4be5e7bd792417c7dae6b (October 8th 2018) was
spotted but thought to be a mergebot issue (might have been one of
the opportunities where ref-checks were added though I can't find
any reference to the commit in the runbot repo).
- 2be25052e147b151d1d8a5bc73cceb351586ce03 (October 15th, 2019) was
missed (or ignored).
- 5a9fe7a7d05a9df7186072a7bffd60c6b428fd0e (July 31st, 2023) was
spotted, but happened at a moment where everything kinda broke
because of github rate-limiting ref updates, so the forensics were
difficult and it was attributed to rate limiting issues.
- f10d03bf0f2e8f88f62a5d8356b84f714196130f (August 24th, 2023) broke
the camel's back (and the head block): the logs were not too
interspersed with other garbage and pretty clear that github ack'd a
ref update, returned the correct oid when checking the ref, then
returned the wrong oid when fetching it later on.
No Working Copy
===============
The working copy turns out to not be necessary, the plumbing commands
we *need* work just fine on a bare repository.
Working without a WC means we had to reimplement the high level
operations (rebase) by hand much as we'd done previously, *but* we
needed to do that anyway as git doesn't seem to provide any way to
retrieve the mapping when rebasing/cherrypicking, and cherrypicking by
commit doesn't work well as it can't really find the *merge base* it
needs.
Forward-porting can almost certainly be implemented similarly (with
some overhead), issue #803 has been opened to keep track of the idea.
No TMP
======
The `tmp.` branches are no more, the process of creating stagings is
based entirely around oids, if staging something fails we can just
abandon the oids (they'll be collected by the weekly GC), we only
need to update the staging branches at the very end of the process.
This simplifies things a fair bit.
For now we have stopped checking for visibility / backoff as we're
pushing via git, hopefully it is a more reliable reference than the
API.
Commmit Message Formatting
==========================
There's some unfortunate churn in the test, as the handling of
trailing newlines differs between github's APIs and git itself.
Fixes #247
PS: It might be a good idea to use pygit2 instead of the CLI
eventually, the library is typed which is nice, and it avoids
shelling out although that's really unlikely to be a major cost.
2023-08-18 18:51:18 +07:00
|
|
|
def __repr__(self):
|
|
|
|
return f'<conftest.Repo {self.name}>'
|
|
|
|
|
2019-10-16 19:41:26 +07:00
|
|
|
@property
|
|
|
|
def owner(self):
|
|
|
|
return self.name.split('/')[0]
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
def unsubscribe(self, token=None):
|
|
|
|
self._get_session(token).put('https://api.github.com/repos/{}/subscription'.format(self.name), json={
|
|
|
|
'subscribed': False,
|
|
|
|
'ignored': True,
|
|
|
|
})
|
|
|
|
|
2020-02-10 15:48:03 +07:00
|
|
|
def add_collaborator(self, login, token):
|
|
|
|
# send invitation to user
|
2021-10-13 12:58:21 +07:00
|
|
|
r = check(self._session.put('https://api.github.com/repos/{}/collaborators/{}'.format(self.name, login)))
|
2020-02-10 15:48:03 +07:00
|
|
|
# accept invitation on behalf of user
|
2021-10-13 12:58:21 +07:00
|
|
|
check(requests.patch('https://api.github.com/user/repository_invitations/{}'.format(r.json()['id']), headers={
|
2020-02-10 15:48:03 +07:00
|
|
|
'Authorization': 'token ' + token
|
2021-10-13 12:58:21 +07:00
|
|
|
}))
|
2020-02-10 15:48:03 +07:00
|
|
|
# sanity check that user is part of collaborators
|
2021-10-13 12:58:21 +07:00
|
|
|
r = check(self._session.get('https://api.github.com/repos/{}/collaborators'.format(self.name)))
|
2020-02-10 15:48:03 +07:00
|
|
|
assert any(login == c['login'] for c in r.json())
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
def _get_session(self, token):
|
|
|
|
s = self._session
|
|
|
|
if token:
|
|
|
|
s = requests.Session()
|
|
|
|
s.headers['Authorization'] = 'token %s' % token
|
|
|
|
return s
|
|
|
|
|
|
|
|
def delete(self):
|
|
|
|
r = self._session.delete('https://api.github.com/repos/{}'.format(self.name))
|
|
|
|
if r.status_code != 204:
|
2021-04-06 15:52:51 +07:00
|
|
|
warnings.warn("Unable to delete repository %s (HTTP %s)" % (self.name, r.status_code))
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
def set_secret(self, secret):
|
|
|
|
assert self.hook
|
|
|
|
r = self._session.get(
|
|
|
|
'https://api.github.com/repos/{}/hooks'.format(self.name))
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
|
|
|
[hook] = r.json()
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
r = self._session.patch('https://api.github.com/repos/{}/hooks/{}'.format(self.name, hook['id']), json={
|
|
|
|
'config': {**hook['config'], 'secret': secret},
|
|
|
|
})
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
def get_ref(self, ref):
|
2019-10-16 19:41:26 +07:00
|
|
|
# differs from .commit(ref).id for the sake of assertion error messages
|
|
|
|
# apparently commits/{ref} returns 422 or some other fool thing when the
|
|
|
|
# ref' does not exist which sucks for asserting "the ref' has been
|
|
|
|
# deleted"
|
|
|
|
# FIXME: avoid calling get_ref on a hash & remove this code
|
|
|
|
if re.match(r'[0-9a-f]{40}', ref):
|
|
|
|
# just check that the commit exists
|
|
|
|
r = self._session.get('https://api.github.com/repos/{}/git/commits/{}'.format(self.name, ref))
|
2021-02-24 15:32:58 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.reason or http.client.responses[r.status_code]
|
2019-10-16 19:41:26 +07:00
|
|
|
return r.json()['sha']
|
|
|
|
|
|
|
|
if ref.startswith('refs/'):
|
|
|
|
ref = ref[5:]
|
|
|
|
if not ref.startswith('heads'):
|
|
|
|
ref = 'heads/' + ref
|
|
|
|
|
|
|
|
r = self._session.get('https://api.github.com/repos/{}/git/ref/{}'.format(self.name, ref))
|
2021-02-24 15:32:58 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.reason or http.client.responses[r.status_code]
|
2019-10-16 19:41:26 +07:00
|
|
|
res = r.json()
|
|
|
|
assert res['object']['type'] == 'commit'
|
|
|
|
return res['object']['sha']
|
2019-10-10 14:22:12 +07:00
|
|
|
|
2024-05-16 15:36:34 +07:00
|
|
|
def commit(self, ref: str) -> Commit:
|
2019-10-10 14:22:12 +07:00
|
|
|
if not re.match(r'[0-9a-f]{40}', ref):
|
|
|
|
if not ref.startswith(('heads/', 'refs/heads/')):
|
|
|
|
ref = 'refs/heads/' + ref
|
|
|
|
# apparently heads/<branch> ~ refs/heads/<branch> but are not
|
|
|
|
# necessarily up to date ??? unlike the git ref system where :ref
|
|
|
|
# starts at heads/
|
|
|
|
if ref.startswith('heads/'):
|
|
|
|
ref = 'refs/' + ref
|
|
|
|
|
|
|
|
r = self._session.get('https://api.github.com/repos/{}/commits/{}'.format(self.name, ref))
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
2024-05-16 15:36:34 +07:00
|
|
|
return self._commit_from_gh(r.json())
|
2019-10-10 14:22:12 +07:00
|
|
|
|
2024-05-16 15:36:34 +07:00
|
|
|
def _commit_from_gh(self, gh_commit: dict) -> Commit:
|
2019-10-10 14:22:12 +07:00
|
|
|
c = gh_commit['commit']
|
|
|
|
return Commit(
|
|
|
|
id=gh_commit['sha'],
|
|
|
|
tree=c['tree']['sha'],
|
|
|
|
message=c['message'],
|
|
|
|
author=c['author'],
|
|
|
|
committer=c['committer'],
|
|
|
|
parents=[p['sha'] for p in gh_commit['parents']],
|
|
|
|
)
|
|
|
|
|
|
|
|
def read_tree(self, commit):
|
|
|
|
""" read tree object from commit
|
|
|
|
|
|
|
|
:param Commit commit:
|
|
|
|
:rtype: Dict[str, str]
|
|
|
|
"""
|
|
|
|
r = self._session.get('https://api.github.com/repos/{}/git/trees/{}'.format(self.name, commit.tree))
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
# read tree's blobs
|
|
|
|
tree = {}
|
|
|
|
for t in r.json()['tree']:
|
|
|
|
assert t['type'] == 'blob', "we're *not* doing recursive trees in test cases"
|
|
|
|
r = self._session.get('https://api.github.com/repos/{}/git/blobs/{}'.format(self.name, t['sha']))
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
tree[t['path']] = base64.b64decode(r.json()['content']).decode()
|
|
|
|
|
|
|
|
return tree
|
|
|
|
|
|
|
|
def make_ref(self, name, commit, force=False):
|
|
|
|
assert self.hook
|
|
|
|
assert name.startswith('heads/')
|
|
|
|
r = self._session.post('https://api.github.com/repos/{}/git/refs'.format(self.name), json={
|
|
|
|
'ref': 'refs/' + name,
|
|
|
|
'sha': commit,
|
|
|
|
})
|
|
|
|
if force and r.status_code == 422:
|
|
|
|
self.update_ref(name, commit, force=force)
|
|
|
|
return
|
2021-08-09 18:21:24 +07:00
|
|
|
assert r.ok, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
def update_ref(self, name, commit, force=False):
|
|
|
|
assert self.hook
|
|
|
|
r = self._session.patch('https://api.github.com/repos/{}/git/refs/{}'.format(self.name, name), json={'sha': commit, 'force': force})
|
2021-08-09 18:21:24 +07:00
|
|
|
assert r.ok, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
def protect(self, branch):
|
|
|
|
assert self.hook
|
|
|
|
r = self._session.put('https://api.github.com/repos/{}/branches/{}/protection'.format(self.name, branch), json={
|
|
|
|
'required_status_checks': None,
|
|
|
|
'enforce_admins': True,
|
|
|
|
'required_pull_request_reviews': None,
|
|
|
|
'restrictions': None,
|
|
|
|
})
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
# FIXME: remove this (runbot_merge should use make_commits directly)
|
|
|
|
def make_commit(self, ref, message, author, committer=None, tree=None, wait=True):
|
|
|
|
assert tree
|
|
|
|
if isinstance(ref, list):
|
|
|
|
assert all(re.match(r'[0-9a-f]{40}', r) for r in ref)
|
|
|
|
ancestor_id = ref
|
|
|
|
ref = None
|
|
|
|
else:
|
|
|
|
ancestor_id = self.get_ref(ref) if ref else None
|
|
|
|
# if ref is already a commit id, don't pass it in
|
|
|
|
if ancestor_id == ref:
|
|
|
|
ref = None
|
|
|
|
|
|
|
|
[h] = self.make_commits(
|
|
|
|
ancestor_id,
|
|
|
|
MakeCommit(message, tree=tree, author=author, committer=committer, reset=True),
|
|
|
|
ref=ref
|
|
|
|
)
|
|
|
|
return h
|
|
|
|
|
2019-10-14 14:33:21 +07:00
|
|
|
def make_commits(self, root, *commits, ref=None, make=True):
|
2019-10-10 14:22:12 +07:00
|
|
|
assert self.hook
|
|
|
|
if isinstance(root, list):
|
|
|
|
parents = root
|
|
|
|
tree = None
|
|
|
|
elif root:
|
|
|
|
c = self.commit(root)
|
|
|
|
tree = c.tree
|
|
|
|
parents = [c.id]
|
|
|
|
else:
|
|
|
|
tree = None
|
|
|
|
parents = []
|
|
|
|
|
|
|
|
hashes = []
|
|
|
|
for commit in commits:
|
2021-11-16 19:59:58 +07:00
|
|
|
if commit.tree:
|
|
|
|
if commit.reset:
|
|
|
|
tree = None
|
|
|
|
r = self._session.post('https://api.github.com/repos/{}/git/trees'.format(self.name), json={
|
|
|
|
'tree': [
|
|
|
|
{'path': k, 'mode': '100644', 'type': 'blob', 'content': v}
|
|
|
|
for k, v in commit.tree.items()
|
|
|
|
],
|
|
|
|
'base_tree': tree
|
|
|
|
})
|
|
|
|
assert r.ok, r.text
|
|
|
|
tree = r.json()['sha']
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
data = {
|
|
|
|
'parents': parents,
|
|
|
|
'message': commit.message,
|
|
|
|
'tree': tree,
|
|
|
|
}
|
|
|
|
if commit.author:
|
|
|
|
data['author'] = commit.author
|
|
|
|
if commit.committer:
|
|
|
|
data['committer'] = commit.committer
|
|
|
|
|
|
|
|
r = self._session.post('https://api.github.com/repos/{}/git/commits'.format(self.name), json=data)
|
2021-08-09 18:21:24 +07:00
|
|
|
assert r.ok, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
hashes.append(r.json()['sha'])
|
|
|
|
parents = [hashes[-1]]
|
|
|
|
|
|
|
|
if ref:
|
2019-10-14 14:33:21 +07:00
|
|
|
fn = self.make_ref if make else self.update_ref
|
|
|
|
fn(ref, hashes[-1], force=True)
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
return hashes
|
|
|
|
|
|
|
|
def fork(self, *, token=None):
|
|
|
|
s = self._get_session(token)
|
|
|
|
|
|
|
|
r = s.post('https://api.github.com/repos/{}/forks'.format(self.name))
|
2023-02-20 16:13:05 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
repo_name = r.json()['full_name']
|
|
|
|
repo_url = 'https://api.github.com/repos/' + repo_name
|
|
|
|
# poll for end of fork
|
|
|
|
limit = time.time() + 60
|
|
|
|
while s.head(repo_url, timeout=5).status_code != 200:
|
|
|
|
if time.time() > limit:
|
|
|
|
raise TimeoutError("No response for repo %s over 60s" % repo_name)
|
|
|
|
time.sleep(1)
|
|
|
|
|
2023-02-01 20:23:03 +07:00
|
|
|
# wait for the branches (which should have been copied over) to be visible
|
|
|
|
while not s.get(f'{repo_url}/branches').json():
|
|
|
|
if time.time() > limit:
|
|
|
|
raise TimeoutError("No response for repo %s over 60s" % repo_name)
|
|
|
|
time.sleep(1)
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
return Repo(s, repo_name, self._repos)
|
|
|
|
|
|
|
|
def get_pr(self, number):
|
|
|
|
# ensure PR exists before returning it
|
|
|
|
self._session.head('https://api.github.com/repos/{}/pulls/{}'.format(
|
|
|
|
self.name,
|
|
|
|
number,
|
|
|
|
)).raise_for_status()
|
|
|
|
return PR(self, number)
|
|
|
|
|
2024-05-16 15:36:34 +07:00
|
|
|
def make_pr(
|
|
|
|
self,
|
|
|
|
*,
|
|
|
|
title: Optional[str] = None,
|
|
|
|
body: Optional[str] = None,
|
|
|
|
target: str,
|
|
|
|
head: str,
|
|
|
|
draft: bool = False,
|
|
|
|
token: Optional[str] = None
|
|
|
|
) -> PR:
|
2019-10-10 14:22:12 +07:00
|
|
|
assert self.hook
|
|
|
|
self.hook = 2
|
|
|
|
|
|
|
|
if title is None:
|
|
|
|
assert ":" not in head, \
|
|
|
|
"will not auto-infer titles for PRs in a remote repo"
|
|
|
|
c = self.commit(head)
|
|
|
|
parts = iter(c.message.split('\n\n', 1))
|
|
|
|
title = next(parts)
|
|
|
|
body = next(parts, None)
|
|
|
|
|
|
|
|
headers = {}
|
|
|
|
if token:
|
|
|
|
headers['Authorization'] = 'token {}'.format(token)
|
|
|
|
|
|
|
|
# FIXME: change tests which pass a commit id to make_pr & remove this
|
|
|
|
if re.match(r'[0-9a-f]{40}', head):
|
|
|
|
ref = "temp_trash_because_head_must_be_a_ref_%d" % next(ct)
|
|
|
|
self.make_ref('heads/' + ref, head)
|
|
|
|
head = ref
|
|
|
|
|
|
|
|
r = self._session.post(
|
|
|
|
'https://api.github.com/repos/{}/pulls'.format(self.name),
|
|
|
|
json={
|
|
|
|
'title': title,
|
|
|
|
'body': body,
|
|
|
|
'head': head,
|
|
|
|
'base': target,
|
2021-08-11 16:36:35 +07:00
|
|
|
'draft': draft,
|
2019-10-10 14:22:12 +07:00
|
|
|
},
|
|
|
|
headers=headers,
|
|
|
|
)
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
2024-05-16 15:36:34 +07:00
|
|
|
return PR(self, r.json()['number'])
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
def post_status(self, ref, status, context='default', **kw):
|
|
|
|
assert self.hook
|
|
|
|
assert status in ('error', 'failure', 'pending', 'success')
|
2022-11-04 21:22:36 +07:00
|
|
|
commit = ref if isinstance(ref, Commit) else self.commit(ref)
|
|
|
|
r = self._session.post('https://api.github.com/repos/{}/statuses/{}'.format(self.name, commit.id), json={
|
2019-10-10 14:22:12 +07:00
|
|
|
'state': status,
|
|
|
|
'context': context,
|
|
|
|
**kw
|
|
|
|
})
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
def is_ancestor(self, sha, of):
|
|
|
|
return any(c['sha'] == sha for c in self.log(of))
|
|
|
|
|
|
|
|
def log(self, ref_or_sha):
|
|
|
|
for page in itertools.count(1):
|
|
|
|
r = self._session.get(
|
|
|
|
'https://api.github.com/repos/{}/commits'.format(self.name),
|
|
|
|
params={'sha': ref_or_sha, 'page': page}
|
|
|
|
)
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
yield from r.json()
|
|
|
|
if not r.links.get('next'):
|
|
|
|
return
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
self.hook = 1
|
|
|
|
return self
|
|
|
|
def __exit__(self, *args):
|
|
|
|
wait_for_hook(self.hook)
|
|
|
|
self.hook = 0
|
|
|
|
class Commit:
|
|
|
|
def __init__(self, message, *, author=None, committer=None, tree, reset=False):
|
|
|
|
self.id = None
|
|
|
|
self.message = message
|
|
|
|
self.author = author
|
|
|
|
self.committer = committer
|
|
|
|
self.tree = tree
|
|
|
|
self.reset = reset
|
|
|
|
MakeCommit = Repo.Commit
|
|
|
|
ct = itertools.count()
|
2020-07-14 15:06:07 +07:00
|
|
|
|
|
|
|
class Comment(tuple):
|
|
|
|
def __new__(cls, c):
|
|
|
|
self = super(Comment, cls).__new__(cls, (c['user']['login'], c['body']))
|
|
|
|
self._c = c
|
|
|
|
return self
|
|
|
|
def __getitem__(self, item):
|
2023-02-14 19:38:37 +07:00
|
|
|
if isinstance(item, int):
|
|
|
|
return super().__getitem__(item)
|
2020-07-14 15:06:07 +07:00
|
|
|
return self._c[item]
|
|
|
|
|
2021-08-11 16:36:35 +07:00
|
|
|
|
|
|
|
PR_SET_READY = '''
|
|
|
|
mutation setReady($pid: ID!) {
|
|
|
|
markPullRequestReadyForReview(input: { pullRequestId: $pid}) {
|
|
|
|
clientMutationId
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
|
|
|
|
PR_SET_DRAFT = '''
|
|
|
|
mutation setDraft($pid: ID!) {
|
|
|
|
convertPullRequestToDraft(input: { pullRequestId: $pid }) {
|
|
|
|
clientMutationId
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'''
|
2022-07-11 19:00:35 +07:00
|
|
|
def state_prop(name: str) -> property:
|
|
|
|
@property
|
|
|
|
def _prop(self):
|
|
|
|
return self._pr[name]
|
|
|
|
return _prop.setter(lambda self, v: self._set_prop(name, v))
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
class PR:
|
|
|
|
def __init__(self, repo, number):
|
|
|
|
self.repo = repo
|
|
|
|
self.number = number
|
|
|
|
self.labels = LabelsProxy(self)
|
2021-09-22 12:43:26 +07:00
|
|
|
self._cache = None, {}
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
@property
|
|
|
|
def _pr(self):
|
2021-09-22 12:43:26 +07:00
|
|
|
previous, caching = self._cache
|
|
|
|
r = self.repo._session.get(
|
|
|
|
'https://api.github.com/repos/{}/pulls/{}'.format(self.repo.name, self.number),
|
|
|
|
headers=caching
|
|
|
|
)
|
2024-04-08 18:23:56 +07:00
|
|
|
assert r.ok, r.text
|
2021-09-22 12:43:26 +07:00
|
|
|
if r.status_code == 304:
|
|
|
|
return previous
|
|
|
|
contents, caching = self._cache = r.json(), {}
|
|
|
|
if r.headers.get('etag'):
|
|
|
|
caching['If-None-Match'] = r.headers['etag']
|
|
|
|
if r.headers.get('last-modified'):
|
|
|
|
caching['If-Modified-Since']= r.headers['Last-Modified']
|
|
|
|
return contents
|
2019-10-10 14:22:12 +07:00
|
|
|
|
2022-07-11 19:00:35 +07:00
|
|
|
title = state_prop('title')
|
|
|
|
body = state_prop('body')
|
|
|
|
base = state_prop('base')
|
2019-10-10 14:22:12 +07:00
|
|
|
|
2021-08-11 16:36:35 +07:00
|
|
|
@property
|
|
|
|
def draft(self):
|
|
|
|
return self._pr['draft']
|
|
|
|
@draft.setter
|
|
|
|
def draft(self, v):
|
|
|
|
assert self.repo.hook
|
|
|
|
# apparently it's not possible to update the draft flag via the v3 API,
|
|
|
|
# only the V4...
|
|
|
|
r = self.repo._session.post('https://api.github.com/graphql', json={
|
|
|
|
'query': PR_SET_DRAFT if v else PR_SET_READY,
|
|
|
|
'variables': {'pid': self._pr['node_id']}
|
|
|
|
})
|
|
|
|
assert r.ok, r.text
|
|
|
|
out = r.json()
|
|
|
|
assert 'errors' not in out, out['errors']
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
@property
|
|
|
|
def head(self):
|
|
|
|
return self._pr['head']['sha']
|
|
|
|
|
|
|
|
@property
|
|
|
|
def user(self):
|
|
|
|
return self._pr['user']['login']
|
|
|
|
|
|
|
|
@property
|
|
|
|
def state(self):
|
|
|
|
return self._pr['state']
|
|
|
|
|
|
|
|
@property
|
|
|
|
def comments(self):
|
|
|
|
r = self.repo._session.get('https://api.github.com/repos/{}/issues/{}/comments'.format(self.repo.name, self.number))
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2020-07-14 15:06:07 +07:00
|
|
|
return [Comment(c) for c in r.json()]
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
@property
|
|
|
|
def ref(self):
|
2019-10-16 19:41:26 +07:00
|
|
|
return 'heads/' + self.branch.branch
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
def post_comment(self, body, token=None):
|
|
|
|
assert self.repo.hook
|
|
|
|
headers = {}
|
|
|
|
if token:
|
|
|
|
headers['Authorization'] = 'token %s' % token
|
|
|
|
r = self.repo._session.post(
|
|
|
|
'https://api.github.com/repos/{}/issues/{}/comments'.format(self.repo.name, self.number),
|
|
|
|
json={'body': body},
|
|
|
|
headers=headers,
|
|
|
|
)
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
return r.json()['id']
|
|
|
|
|
|
|
|
def edit_comment(self, cid, body, token=None):
|
|
|
|
assert self.repo.hook
|
|
|
|
headers = {}
|
|
|
|
if token:
|
|
|
|
headers['Authorization'] = 'token %s' % token
|
|
|
|
r = self.repo._session.patch(
|
|
|
|
'https://api.github.com/repos/{}/issues/comments/{}'.format(self.repo.name, cid),
|
|
|
|
json={'body': body},
|
|
|
|
headers=headers
|
|
|
|
)
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
wait_for_hook()
|
|
|
|
|
|
|
|
def delete_comment(self, cid, token=None):
|
|
|
|
assert self.repo.hook
|
|
|
|
headers = {}
|
|
|
|
if token:
|
|
|
|
headers['Authorization'] = 'token %s' % token
|
|
|
|
r = self.repo._session.delete(
|
|
|
|
'https://api.github.com/repos/{}/issues/comments/{}'.format(self.repo.name, cid),
|
|
|
|
headers=headers
|
|
|
|
)
|
2024-04-08 18:23:56 +07:00
|
|
|
assert r.status_code == 204, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
2020-02-10 15:48:03 +07:00
|
|
|
def _set_prop(self, prop, value, token=None):
|
2019-10-10 14:22:12 +07:00
|
|
|
assert self.repo.hook
|
2020-02-10 15:48:03 +07:00
|
|
|
headers = {}
|
|
|
|
if token:
|
|
|
|
headers['Authorization'] = 'token ' + token
|
2019-10-10 14:22:12 +07:00
|
|
|
r = self.repo._session.patch('https://api.github.com/repos/{}/pulls/{}'.format(self.repo.name, self.number), json={
|
|
|
|
prop: value
|
2020-02-10 15:48:03 +07:00
|
|
|
}, headers=headers)
|
2021-08-11 16:36:35 +07:00
|
|
|
assert r.ok, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
2020-02-10 15:48:03 +07:00
|
|
|
def open(self, token=None):
|
|
|
|
self._set_prop('state', 'open', token=token)
|
2019-10-10 14:22:12 +07:00
|
|
|
|
2020-03-02 19:42:07 +07:00
|
|
|
def close(self, token=None):
|
|
|
|
self._set_prop('state', 'closed', token=token)
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
@property
|
|
|
|
def branch(self):
|
|
|
|
r = self.repo._session.get('https://api.github.com/repos/{}/pulls/{}'.format(
|
|
|
|
self.repo.name,
|
|
|
|
self.number,
|
|
|
|
))
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
info = r.json()
|
|
|
|
|
|
|
|
repo = self.repo
|
|
|
|
reponame = info['head']['repo']['full_name']
|
|
|
|
if reponame != self.repo.name:
|
|
|
|
# not sure deep copying the session object is safe / proper...
|
|
|
|
repo = Repo(copy.deepcopy(self.repo._session), reponame, [])
|
|
|
|
|
|
|
|
return PRBranch(repo, info['head']['ref'])
|
|
|
|
|
|
|
|
def post_review(self, state, body, token=None):
|
|
|
|
assert self.repo.hook
|
|
|
|
headers = {}
|
|
|
|
if token:
|
|
|
|
headers['Authorization'] = 'token %s' % token
|
|
|
|
r = self.repo._session.post(
|
|
|
|
'https://api.github.com/repos/{}/pulls/{}/reviews'.format(self.repo.name, self.number),
|
|
|
|
json={'body': body, 'event': state,},
|
|
|
|
headers=headers
|
|
|
|
)
|
2024-04-08 18:23:56 +07:00
|
|
|
assert 200 <= r.status_code < 300, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
PRBranch = collections.namedtuple('PRBranch', 'repo branch')
|
|
|
|
class LabelsProxy(collections.abc.MutableSet):
|
|
|
|
def __init__(self, pr):
|
|
|
|
self._pr = pr
|
|
|
|
|
|
|
|
@property
|
|
|
|
def _labels(self):
|
|
|
|
pr = self._pr
|
|
|
|
r = pr.repo._session.get('https://api.github.com/repos/{}/issues/{}/labels'.format(pr.repo.name, pr.number))
|
2024-04-08 18:23:56 +07:00
|
|
|
assert r.ok, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
return {label['name'] for label in r.json()}
|
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
return '<LabelsProxy %r>' % self._labels
|
|
|
|
|
|
|
|
def __eq__(self, other):
|
|
|
|
if isinstance(other, collections.abc.Set):
|
|
|
|
return other == self._labels
|
|
|
|
return NotImplemented
|
|
|
|
|
|
|
|
def __contains__(self, label):
|
|
|
|
return label in self._labels
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return iter(self._labels)
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self._labels)
|
|
|
|
|
|
|
|
def add(self, label):
|
|
|
|
pr = self._pr
|
|
|
|
assert pr.repo.hook
|
|
|
|
r = pr.repo._session.post('https://api.github.com/repos/{}/issues/{}/labels'.format(pr.repo.name, pr.number), json={
|
|
|
|
'labels': [label]
|
|
|
|
})
|
2024-04-08 18:23:56 +07:00
|
|
|
assert r.ok, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
def discard(self, label):
|
|
|
|
pr = self._pr
|
|
|
|
assert pr.repo.hook
|
|
|
|
r = pr.repo._session.delete('https://api.github.com/repos/{}/issues/{}/labels/{}'.format(pr.repo.name, pr.number, label))
|
|
|
|
# discard should do nothing if the item didn't exist in the set
|
2024-04-08 18:23:56 +07:00
|
|
|
assert r.ok or r.status_code == 404, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
def update(self, *others):
|
|
|
|
pr = self._pr
|
|
|
|
assert pr.repo.hook
|
|
|
|
# because of course that one is not provided by MutableMapping...
|
|
|
|
r = pr.repo._session.post('https://api.github.com/repos/{}/issues/{}/labels'.format(pr.repo.name, pr.number), json={
|
|
|
|
'labels': list(set(itertools.chain.from_iterable(others)))
|
|
|
|
})
|
2024-04-08 18:23:56 +07:00
|
|
|
assert r.ok, r.text
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
class Environment:
|
|
|
|
def __init__(self, port, db, default_crons=()):
|
|
|
|
self._uid = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/common'.format(port)).authenticate(db, 'admin', 'admin', {})
|
|
|
|
self._object = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/object'.format(port))
|
|
|
|
self._db = db
|
|
|
|
self._default_crons = default_crons
|
|
|
|
|
|
|
|
def __call__(self, model, method, *args, **kwargs):
|
|
|
|
return self._object.execute_kw(
|
|
|
|
self._db, self._uid, 'admin',
|
|
|
|
model, method,
|
|
|
|
args, kwargs
|
|
|
|
)
|
|
|
|
|
|
|
|
def __getitem__(self, name):
|
|
|
|
return Model(self, name)
|
|
|
|
|
2023-06-07 19:42:08 +07:00
|
|
|
def ref(self, xid, raise_if_not_found=True):
|
|
|
|
model, obj_id = self(
|
|
|
|
'ir.model.data', 'check_object_reference',
|
|
|
|
*xid.split('.', 1),
|
|
|
|
raise_on_access_error=raise_if_not_found
|
|
|
|
)
|
|
|
|
return Model(self, model, [obj_id]) if obj_id else None
|
|
|
|
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
def run_crons(self, *xids, **kw):
|
|
|
|
crons = xids or self._default_crons
|
|
|
|
print('running crons', crons, file=sys.stderr)
|
|
|
|
for xid in crons:
|
2019-10-11 14:09:43 +07:00
|
|
|
t0 = time.time()
|
2019-10-10 14:22:12 +07:00
|
|
|
print('\trunning cron', xid, '...', file=sys.stderr)
|
2022-11-17 16:30:04 +07:00
|
|
|
model, cron_id = self('ir.model.data', 'check_object_reference', *xid.split('.', 1))
|
2019-10-10 14:22:12 +07:00
|
|
|
assert model == 'ir.cron', "Expected {} to be a cron, got {}".format(xid, model)
|
|
|
|
self('ir.cron', 'method_direct_trigger', [cron_id], **kw)
|
2019-10-11 14:09:43 +07:00
|
|
|
print('\tdone %.3fs' % (time.time() - t0), file=sys.stderr)
|
2019-10-10 14:22:12 +07:00
|
|
|
print('done', file=sys.stderr)
|
|
|
|
# sleep for some time as a lot of crap may have happened (?)
|
|
|
|
wait_for_hook()
|
|
|
|
|
|
|
|
class Model:
|
2020-02-10 21:05:08 +07:00
|
|
|
__slots__ = ['env', '_name', '_ids', '_fields']
|
2019-10-10 14:22:12 +07:00
|
|
|
def __init__(self, env, model, ids=(), fields=None):
|
2020-02-10 21:05:08 +07:00
|
|
|
object.__setattr__(self, 'env', env)
|
|
|
|
object.__setattr__(self, '_name', model)
|
2019-10-10 14:22:12 +07:00
|
|
|
object.__setattr__(self, '_ids', tuple(ids or ()))
|
|
|
|
|
2020-02-10 21:05:08 +07:00
|
|
|
object.__setattr__(self, '_fields', fields or self.env(self._name, 'fields_get', attributes=['type', 'relation']))
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
@property
|
|
|
|
def ids(self):
|
|
|
|
return self._ids
|
|
|
|
|
2020-02-10 21:05:08 +07:00
|
|
|
@property
|
|
|
|
def _env(self): return self.env
|
|
|
|
|
|
|
|
@property
|
|
|
|
def _model(self): return self._name
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
def __bool__(self):
|
|
|
|
return bool(self._ids)
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self._ids)
|
|
|
|
|
2024-02-09 14:58:19 +07:00
|
|
|
def __hash__(self):
|
|
|
|
return hash((self._model, frozenset(self._ids)))
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
def __eq__(self, other):
|
|
|
|
if not isinstance(other, Model):
|
|
|
|
return NotImplemented
|
2019-10-14 14:33:21 +07:00
|
|
|
return self._model == other._model and set(self._ids) == set(other._ids)
|
2019-10-10 14:22:12 +07:00
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
return "{}({})".format(self._model, ', '.join(str(id_) for id_ in self._ids))
|
|
|
|
|
2022-11-10 21:15:11 +07:00
|
|
|
# method: (model, rebrowse)
|
|
|
|
_conf = {
|
|
|
|
'check_object_reference': (True, False),
|
|
|
|
'create': (True, True),
|
|
|
|
'exists': (False, True),
|
|
|
|
'fields_get': (True, False),
|
|
|
|
'name_create': (False, True),
|
|
|
|
'name_search': (True, False),
|
|
|
|
'search': (True, True),
|
|
|
|
'search_count': (True, False),
|
|
|
|
'search_read': (True, False),
|
|
|
|
'filtered': (False, True),
|
|
|
|
}
|
2019-10-10 14:22:12 +07:00
|
|
|
|
2022-11-10 21:15:11 +07:00
|
|
|
def browse(self, ids):
|
2019-10-10 14:22:12 +07:00
|
|
|
return Model(self._env, self._model, ids)
|
|
|
|
|
2022-11-10 21:15:11 +07:00
|
|
|
# because sorted is not xmlrpc-compatible (it doesn't downgrade properly)
|
2019-10-14 14:33:21 +07:00
|
|
|
def sorted(self, field):
|
2023-07-04 18:08:40 +07:00
|
|
|
fn = field if callable(field) else lambda r: r[field]
|
|
|
|
|
|
|
|
return Model(self._env, self._model, (
|
|
|
|
id
|
|
|
|
for record in sorted(self, key=fn)
|
|
|
|
for id in record.ids
|
|
|
|
))
|
2019-10-14 14:33:21 +07:00
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
def __getitem__(self, index):
|
|
|
|
if isinstance(index, str):
|
|
|
|
return getattr(self, index)
|
|
|
|
ids = self._ids[index]
|
|
|
|
if isinstance(ids, int):
|
|
|
|
ids = [ids]
|
|
|
|
|
|
|
|
return Model(self._env, self._model, ids, fields=self._fields)
|
|
|
|
|
|
|
|
def __getattr__(self, fieldname):
|
2020-01-27 20:10:58 +07:00
|
|
|
if fieldname in ['__dataclass_fields__', '__attrs_attrs__']:
|
|
|
|
raise AttributeError('%r is invalid on %s' % (fieldname, self._model))
|
2021-10-06 18:03:03 +07:00
|
|
|
|
|
|
|
field_description = self._fields.get(fieldname)
|
|
|
|
if field_description is None:
|
|
|
|
return functools.partial(self._call, fieldname)
|
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
if not self._ids:
|
|
|
|
return False
|
|
|
|
|
|
|
|
if fieldname == 'id':
|
|
|
|
return self._ids[0]
|
|
|
|
|
2021-10-06 18:03:03 +07:00
|
|
|
val = self.read([fieldname])[0][fieldname]
|
2019-10-10 14:22:12 +07:00
|
|
|
field_description = self._fields[fieldname]
|
|
|
|
if field_description['type'] in ('many2one', 'one2many', 'many2many'):
|
|
|
|
val = val or []
|
|
|
|
if field_description['type'] == 'many2one':
|
|
|
|
val = val[:1] # (id, name) => [id]
|
|
|
|
return Model(self._env, field_description['relation'], val)
|
|
|
|
|
|
|
|
return val
|
|
|
|
|
2020-03-16 20:16:01 +07:00
|
|
|
# because it's difficult to discriminate between methods and fields
|
|
|
|
def _call(self, name, *args, **kwargs):
|
2022-11-10 21:15:11 +07:00
|
|
|
model, rebrowse = self._conf.get(name, (False, False))
|
|
|
|
|
|
|
|
if model:
|
|
|
|
res = self._env(self._model, name, *args, **kwargs)
|
|
|
|
else:
|
|
|
|
res = self._env(self._model, name, self._ids, *args, **kwargs)
|
|
|
|
|
|
|
|
if not rebrowse:
|
|
|
|
return res
|
|
|
|
if isinstance(res, int):
|
|
|
|
return self.browse([res])
|
|
|
|
return self.browse(res)
|
2020-03-16 20:16:01 +07:00
|
|
|
|
2019-10-10 14:22:12 +07:00
|
|
|
def __setattr__(self, fieldname, value):
|
|
|
|
self._env(self._model, 'write', self._ids, {fieldname: value})
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return (
|
|
|
|
Model(self._env, self._model, [i], fields=self._fields)
|
|
|
|
for i in self._ids
|
|
|
|
)
|
|
|
|
|
|
|
|
def mapped(self, path):
|
|
|
|
field, *rest = path.split('.', 1)
|
|
|
|
descr = self._fields[field]
|
|
|
|
if descr['type'] in ('many2one', 'one2many', 'many2many'):
|
|
|
|
result = Model(self._env, descr['relation'])
|
|
|
|
for record in self:
|
|
|
|
result |= getattr(record, field)
|
|
|
|
|
|
|
|
return result.mapped(rest[0]) if rest else result
|
|
|
|
|
|
|
|
assert not rest
|
|
|
|
return [getattr(r, field) for r in self]
|
|
|
|
|
|
|
|
def filtered(self, fn):
|
|
|
|
result = Model(self._env, self._model, fields=self._fields)
|
|
|
|
for record in self:
|
|
|
|
if fn(record):
|
|
|
|
result |= record
|
|
|
|
return result
|
|
|
|
|
|
|
|
def __sub__(self, other):
|
|
|
|
if not isinstance(other, Model) or self._model != other._model:
|
|
|
|
return NotImplemented
|
|
|
|
|
|
|
|
return Model(self._env, self._model, tuple(id_ for id_ in self._ids if id_ not in other._ids), fields=self._fields)
|
|
|
|
|
|
|
|
def __or__(self, other):
|
|
|
|
if not isinstance(other, Model) or self._model != other._model:
|
|
|
|
return NotImplemented
|
|
|
|
|
2021-02-26 15:43:58 +07:00
|
|
|
return Model(
|
|
|
|
self._env, self._model,
|
|
|
|
self._ids + tuple(id_ for id_ in other.ids if id_ not in self._ids),
|
|
|
|
fields=self._fields
|
|
|
|
)
|
2019-10-10 14:22:12 +07:00
|
|
|
__add__ = __or__
|
|
|
|
|
|
|
|
def __and__(self, other):
|
|
|
|
if not isinstance(other, Model) or self._model != other._model:
|
|
|
|
return NotImplemented
|
|
|
|
|
|
|
|
return Model(self._env, self._model, tuple(id_ for id_ in self._ids if id_ in other._ids), fields=self._fields)
|
|
|
|
|
|
|
|
def invalidate_cache(self, fnames=None, ids=None):
|
|
|
|
pass # not a concern when every access is an RPC call
|