diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a40995a1c5..5b95cecfc9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,12 +12,15 @@ FIXME: Compare shows single commit. SEE https://github.com/ITISFoundation/osparc ### Added - Started this *human-readable* CHANGELOG -- ``migration`` service that discovers postgres service and upgrades main database (#1714) -- Every group can register official classifiers for studies and services. Diplayed as a tree in UI (#1670, #1719, #1722) +- ``migration`` service that discovers postgres service and upgrades main database [#1714](https://github.com/ITISFoundation/osparc-simcore/pull/1714) +- Every group can register official classifiers for studies and services. Diplayed as a tree in UI [#1670](https://github.com/ITISFoundation/osparc-simcore/pull/1670), [#1719](https://github.com/ITISFoundation/osparc-simcore/pull/1719) , [#1722](https://github.com/ITISFoundation/osparc-simcore/pull/1722) +- GC tests are run in isolation with a template database [#1724](https://github.com/ITISFoundation/osparc-simcore/pull/1724) ### Changed -- Speedup testing by splitting webserver (#1711) +- Speedup testing by splitting webserver [#1711](https://github.com/ITISFoundation/osparc-simcore/pull/1711) +- Refactored garbage collector and added tests [#1724](https://github.com/ITISFoundation/osparc-simcore/pull/1724) +- Logs are now displayed during testing [#1724](https://github.com/ITISFoundation/osparc-simcore/pull/1724) @@ -28,45 +31,45 @@ FIXME: Compare shows single commit. SEE https://github.com/ITISFoundation/osparc ## [0.0.25] - 2020-08-04 ### Added -- add traefik endpoint to api-gateway (#1555) -- Shared project concurrency (frontend) (#1591) -- Homogenize studies and services (#1569) +- add traefik endpoint to api-gateway [#1555](https://github.com/ITISFoundation/osparc-simcore/pull/1555) +- Shared project concurrency (frontend) [#1591](https://github.com/ITISFoundation/osparc-simcore/pull/1591) +- Homogenize studies and services [#1569](https://github.com/ITISFoundation/osparc-simcore/pull/1569) - UI Fine grained access - project locking and notification -- Adds support for GPU scheduling of computational services (#1553) +- Adds support for GPU scheduling of computational services [#1553](https://github.com/ITISFoundation/osparc-simcore/pull/1553) ### Changed -- UI/UX improvements (#1657) -- Improving storage performance (#1659) -- Theming (#1656) -- Reduce cardinality of metrics (#1593) +- UI/UX improvements [#1657](https://github.com/ITISFoundation/osparc-simcore/pull/1657) +- Improving storage performance [#1659](https://github.com/ITISFoundation/osparc-simcore/pull/1659) +- Theming [#1656](https://github.com/ITISFoundation/osparc-simcore/pull/1656) +- Reduce cardinality of metrics [#1593](https://github.com/ITISFoundation/osparc-simcore/pull/1593) ### Fixed -- Platform stability: (#1645) -- Fix, improves and re-activate e2e CI testing (#1594, #1620, #1631, #1600) -- Fixes defaults (#1640) -- Upgrade storage service (#1585, #1586) -- UPgrade catalog service (#1582) -- Fixes on publish studies handling (#1632) -- Invalidate cache before starting a study (#1602) -- Some enhancements and bug fixes (#1608) -- filter studies by name before deleting them (#1629) -- Bugfix/apiserver does not need sslheaders (#1564) -- fix testing if node has gpu support (#1604) -- /study fails 500 (#1570, #1572) -- fix codecov reports (#1568) +- Platform stability: [#1645](https://github.com/ITISFoundation/osparc-simcore/pull/1645) +- Fix, improves and re-activate e2e CI testing [#1594](https://github.com/ITISFoundation/osparc-simcore/pull/1594), [#1620](https://github.com/ITISFoundation/osparc-simcore/pull/1620), [#1631](https://github.com/ITISFoundation/osparc-simcore/pull/1631), [#1600](https://github.com/ITISFoundation/osparc-simcore/pull/1600) +- Fixes defaults [#1640](https://github.com/ITISFoundation/osparc-simcore/pull/1640) +- Upgrade storage service [#1585](https://github.com/ITISFoundation/osparc-simcore/pull/1585), [#1586](https://github.com/ITISFoundation/osparc-simcore/pull/1586) +- UPgrade catalog service [#1582](https://github.com/ITISFoundation/osparc-simcore/pull/1582) +- Fixes on publish studies handling [#1632](https://github.com/ITISFoundation/osparc-simcore/pull/1632) +- Invalidate cache before starting a study [#1602](https://github.com/ITISFoundation/osparc-simcore/pull/1602) +- Some enhancements and bug fixes [#1608](https://github.com/ITISFoundation/osparc-simcore/pull/1608) +- filter studies by name before deleting them [#1629](https://github.com/ITISFoundation/osparc-simcore/pull/1629) +- Bugfix/apiserver does not need sslheaders [#1564](https://github.com/ITISFoundation/osparc-simcore/pull/1564) +- fix testing if node has gpu support [#1604](https://github.com/ITISFoundation/osparc-simcore/pull/1604) +- /study fails 500 [#1570](https://github.com/ITISFoundation/osparc-simcore/pull/1570), [#1572](https://github.com/ITISFoundation/osparc-simcore/pull/1572) +- fix codecov reports [#1568](https://github.com/ITISFoundation/osparc-simcore/pull/1568) ### Security -- Bump yarl from 1.4.2 to 1.5.1 in /packages/postgres-database (#1665) -- Bump ujson from 3.0.0 to 3.1.0 in /packages/service-library (#1664) -- Bump pytest-docker from 0.7.2 to 0.8.0 in /packages/service-library (#1647) -- Bump aiozipkin from 0.6.0 to 0.7.0 in /packages/service-library (#1642) -- Bump lodash from 4.17.15 to 4.17.19 (#1639) -- Maintenance/upgrades test tools (#1628) -- Bugfix/concurent opening projects (#1598) -- Bugfix/allow reading groups anonymous user (#1615) -- Bump docker from 4.2.1 to 4.2.2 in /packages/postgres-database (#1605) -- Bump faker from 4.1.0 to 4.1.1 in /packages/postgres-database (#1573) -- Maintenance/upgrades and tooling (#1546) +- Bump yarl from 1.4.2 to 1.5.1 in /packages/postgres-database [#1665](https://github.com/ITISFoundation/osparc-simcore/pull/1665) +- Bump ujson from 3.0.0 to 3.1.0 in /packages/service-library [#1664](https://github.com/ITISFoundation/osparc-simcore/pull/1664) +- Bump pytest-docker from 0.7.2 to 0.8.0 in /packages/service-library [#1647](https://github.com/ITISFoundation/osparc-simcore/pull/1647) +- Bump aiozipkin from 0.6.0 to 0.7.0 in /packages/service-library [#1642](https://github.com/ITISFoundation/osparc-simcore/pull/1642) +- Bump lodash from 4.17.15 to 4.17.19 [#1639](https://github.com/ITISFoundation/osparc-simcore/pull/1639) +- Maintenance/upgrades test tools [#1628](https://github.com/ITISFoundation/osparc-simcore/pull/1628) +- Bugfix/concurent opening projects [#1598](https://github.com/ITISFoundation/osparc-simcore/pull/1598) +- Bugfix/allow reading groups anonymous user [#1615](https://github.com/ITISFoundation/osparc-simcore/pull/1615) +- Bump docker from 4.2.1 to 4.2.2 in /packages/postgres-database [#1605](https://github.com/ITISFoundation/osparc-simcore/pull/1605) +- Bump faker from 4.1.0 to 4.1.1 in /packages/postgres-database [#1573](https://github.com/ITISFoundation/osparc-simcore/pull/1573) +- Maintenance/upgrades and tooling [#1546](https://github.com/ITISFoundation/osparc-simcore/pull/1546) --- diff --git a/packages/postgres-database/src/simcore_postgres_database/cli.py b/packages/postgres-database/src/simcore_postgres_database/cli.py index b0ee1217c09..aa85a3f2f4b 100644 --- a/packages/postgres-database/src/simcore_postgres_database/cli.py +++ b/packages/postgres-database/src/simcore_postgres_database/cli.py @@ -37,7 +37,11 @@ discovered_cache = os.path.expanduser("~/.simcore_postgres_database_cache.json") log = logging.getLogger("root") -fileConfig(default_ini) + +if __name__ == "__main__": + # swallows up all log messages from tests + # only enable it during cli invocation + fileConfig(default_ini) def safe(if_fails_return=False): diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/env.py b/packages/postgres-database/src/simcore_postgres_database/migration/env.py index 6836d5ae008..b4ed815e55a 100644 --- a/packages/postgres-database/src/simcore_postgres_database/migration/env.py +++ b/packages/postgres-database/src/simcore_postgres_database/migration/env.py @@ -1,4 +1,3 @@ - from logging.config import fileConfig from alembic import context @@ -12,7 +11,11 @@ # Interpret the config file for Python logging. # This line sets up loggers basically. -fileConfig(config.config_file_name) + +if __name__ == "__main__": + # swallows up all log messages from tests + # only enable it during cli invocation + fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support @@ -38,9 +41,7 @@ def run_migrations_offline(): """ # pylint: disable=no-member url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, target_metadata=target_metadata, literal_binds=True - ) + context.configure(url=url, target_metadata=target_metadata, literal_binds=True) with context.begin_transaction(): context.run_migrations() @@ -61,9 +62,7 @@ def run_migrations_online(): ) with connectable.connect() as connection: - context.configure( - connection=connection, target_metadata=target_metadata - ) + context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_projects.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_projects.py index 870d61eaec8..8290c2e4540 100644 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_projects.py +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_projects.py @@ -10,6 +10,7 @@ import re import uuid as uuidlib from typing import Dict +from simcore_service_webserver.utils import now_str from aiohttp import web @@ -32,6 +33,19 @@ ] +def empty_project_data(): + return { + "uuid": f"project-{uuidlib.uuid4()}", + "name": "Empty name", + "description": "some description of an empty project", + "prjOwner": "I'm the empty project owner, hi!", + "creationDate": now_str(), + "lastChangeDate": now_str(), + "thumbnail": "", + "workbench": {}, + } + + def load_data(name): with resources.stream(name) as fp: return json.load(fp) @@ -62,7 +76,7 @@ async def create_project( try: uuidlib.UUID(project_data["uuid"]) assert new_project["uuid"] == project_data["uuid"] - except ValueError: + except (ValueError, AssertionError): # in that case the uuid gets replaced assert new_project["uuid"] != project_data["uuid"] project_data["uuid"] = new_project["uuid"] @@ -90,7 +104,7 @@ def __init__( clear_all=True, user_id=None, *, - force_uuid=False + force_uuid=False, ): self.params = params self.user_id = user_id diff --git a/packages/pytest-simcore/src/pytest_simcore/postgres_service.py b/packages/pytest-simcore/src/pytest_simcore/postgres_service.py index 0b4f955bfac..585b1d4cc22 100644 --- a/packages/pytest-simcore/src/pytest_simcore/postgres_service.py +++ b/packages/pytest-simcore/src/pytest_simcore/postgres_service.py @@ -2,22 +2,134 @@ # pylint:disable=unused-argument # pylint:disable=redefined-outer-name -import os -from typing import Dict import logging +import asyncio +import os +from typing import Dict, List import pytest +import simcore_postgres_database.cli as pg_cli import sqlalchemy as sa import tenacity -from sqlalchemy.orm import sessionmaker - -import simcore_postgres_database.cli as pg_cli from simcore_postgres_database.models.base import metadata +from sqlalchemy.orm import sessionmaker from .helpers.utils_docker import get_service_published_port log = logging.getLogger(__name__) +TEMPLATE_DB_TO_RESTORE = "template_simcore_db" + + +def execute_queries( + postgres_engine: sa.engine.Engine, + sql_statements: List[str], + ignore_errors: bool = False, +) -> None: + """runs the queries in the list in order""" + with postgres_engine.connect() as con: + for statement in sql_statements: + try: + con.execution_options(autocommit=True).execute(statement) + except Exception as e: # pylint: disable=broad-except + # when running tests initially the TEMPLATE_DB_TO_RESTORE dose not exist and will cause an error + # which can safely be ignored. The debug message is here to catch future errors which and + # avoid time wasting + log.debug("SQL error which can be ignored %s", str(e)) + + +def create_template_db(postgres_dsn: Dict, postgres_engine: sa.engine.Engine) -> None: + # create a template db, the removal is necessary to allow for the usage of --keep-docker-up + queries = [ + # disconnect existing users + f""" + SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity + WHERE pg_stat_activity.datname = '{postgres_dsn["database"]}' AND pid <> pg_backend_pid(); + """, + # drop template database + f"ALTER DATABASE {TEMPLATE_DB_TO_RESTORE} is_template false;", + f"DROP DATABASE {TEMPLATE_DB_TO_RESTORE};", + # create template database + """ + CREATE DATABASE {template_db} WITH TEMPLATE {original_db} OWNER {db_user}; + """.format( + template_db=TEMPLATE_DB_TO_RESTORE, + original_db=postgres_dsn["database"], + db_user=postgres_dsn["user"], + ), + ] + execute_queries(postgres_engine, queries, ignore_errors=True) + + +def drop_template_db(postgres_engine: sa.engine.Engine) -> None: + # remove the template db + queries = [ + # drop template database + f"ALTER DATABASE {TEMPLATE_DB_TO_RESTORE} is_template false;", + f"DROP DATABASE {TEMPLATE_DB_TO_RESTORE};", + ] + execute_queries(postgres_engine, queries) + + +@pytest.fixture(scope="module") +def loop(request): + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() + + +@pytest.fixture(scope="module") +def postgres_with_template_db( + postgres_db: sa.engine.Engine, postgres_dsn: Dict, postgres_engine: sa.engine.Engine +) -> sa.engine.Engine: + create_template_db(postgres_dsn, postgres_engine) + yield postgres_engine + drop_template_db(postgres_engine) + + +@pytest.fixture +def drop_db_engine(postgres_dsn: Dict) -> sa.engine.Engine: + postgres_dsn_copy = postgres_dsn.copy() # make a copy to change these parameters + postgres_dsn_copy["database"] = "postgres" + dsn = "postgresql://{user}:{password}@{host}:{port}/{database}".format( + **postgres_dsn_copy + ) + return sa.create_engine(dsn, isolation_level="AUTOCOMMIT") + + +@pytest.fixture +def database_from_template_before_each_function( + postgres_dsn: Dict, drop_db_engine: sa.engine.Engine, postgres_db +) -> None: + """ + Will recrate the db before running each test. + + **Note: must be implemented in the module where the the + `postgres_with_template_db` is used and mark autouse=True** + + It is possible to drop the application database by ussing another one like + the posgtres database. The db will be recrated from the previously created template + + The postgres_db fixture is required for the template database to be created. + """ + + queries = [ + # terminate existing connections to the database + f""" + SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity + WHERE pg_stat_activity.datname = '{postgres_dsn["database"]}'; + """, + # drop database + f"DROP DATABASE {postgres_dsn['database']};", + # create from template database + f"CREATE DATABASE {postgres_dsn['database']} TEMPLATE template_simcore_db;", + ] + + execute_queries(drop_db_engine, queries) + + yield + # do nothing on teadown + @pytest.fixture(scope="module") def postgres_dsn(docker_stack: Dict, devel_environ: Dict) -> Dict[str, str]: diff --git a/packages/pytest-simcore/src/pytest_simcore/websocket_client.py b/packages/pytest-simcore/src/pytest_simcore/websocket_client.py index a9f5927d241..78c55dc6cb7 100644 --- a/packages/pytest-simcore/src/pytest_simcore/websocket_client.py +++ b/packages/pytest-simcore/src/pytest_simcore/websocket_client.py @@ -5,50 +5,60 @@ import pytest import socketio from yarl import URL +from aiohttp import web +from pytest_simcore.helpers.utils_assert import assert_status +from typing import Callable, Optional -from servicelib.rest_responses import unwrap_envelope +@pytest.fixture +def socketio_url(client) -> Callable: + def create_url(client_override: Optional = None) -> str: + SOCKET_IO_PATH = "/socket.io/" + return str((client_override or client).make_url(SOCKET_IO_PATH)) -@pytest.fixture() -async def security_cookie_factory(loop, client) -> str: - # get the cookie by calling the root entrypoint - resp = await client.get("/v0/") - payload = await resp.json() - assert resp.status == 200, str(payload) - data, error = unwrap_envelope(payload) - assert data - assert not error + yield create_url - cookie = "" - if "Cookie" in resp.request_info.headers: - cookie = resp.request_info.headers["Cookie"] - yield cookie +@pytest.fixture +async def security_cookie_factory(client) -> Callable: + async def creator(client_override: Optional = None) -> str: + # get the cookie by calling the root entrypoint + resp = await (client_override or client).get("/v0/") + data, error = await assert_status(resp, web.HTTPOk) + assert data + assert not error -@pytest.fixture() -async def socketio_url(loop, client) -> str: - SOCKET_IO_PATH = "/socket.io/" - return str(client.make_url(SOCKET_IO_PATH)) + cookie = ( + resp.request_info.headers["Cookie"] + if "Cookie" in resp.request_info.headers + else "" + ) + return cookie + + yield creator -@pytest.fixture() +@pytest.fixture async def socketio_client( - socketio_url: str, security_cookie_factory: str -) -> socketio.AsyncClient: + socketio_url: Callable, security_cookie_factory: Callable +) -> Callable: clients = [] - async def connect(client_session_id) -> socketio.AsyncClient: - sio = socketio.AsyncClient( - ssl_verify=False - ) # enginio 3.10.0 introduced ssl verification + async def connect( + client_session_id: str, client: Optional = None + ) -> socketio.AsyncClient: + sio = socketio.AsyncClient(ssl_verify=False) + # enginio 3.10.0 introduced ssl verification url = str( - URL(socketio_url).with_query({"client_session_id": client_session_id}) + URL(socketio_url(client)).with_query( + {"client_session_id": client_session_id} + ) ) - headers = {} - if security_cookie_factory: + cookie = await security_cookie_factory(client) + if cookie: # WARNING: engineio fails with empty cookies. Expects "key=value" - headers.update({"Cookie": security_cookie_factory}) + headers.update({"Cookie": cookie}) await sio.connect(url, headers=headers) assert sio.sid diff --git a/services/web/server/src/simcore_service_webserver/groups_api.py b/services/web/server/src/simcore_service_webserver/groups_api.py index 9b0ff359896..aa60b31bccd 100644 --- a/services/web/server/src/simcore_service_webserver/groups_api.py +++ b/services/web/server/src/simcore_service_webserver/groups_api.py @@ -351,3 +351,12 @@ async def get_group_classifier(app: web.Application, gid: int) -> Dict: ) ) return bundle or {} + + +async def get_group_from_gid(app: web.Application, gid: int) -> Dict: + engine = app[APP_DB_ENGINE_KEY] + async with engine.acquire() as conn: + group = await conn.execute( + sa.select([groups]).where(groups.c.gid == gid) + ) + return await group.fetchone() diff --git a/services/web/server/src/simcore_service_webserver/projects/projects_db.py b/services/web/server/src/simcore_service_webserver/projects/projects_db.py index c2009144a01..1ce7655e8fb 100644 --- a/services/web/server/src/simcore_service_webserver/projects/projects_db.py +++ b/services/web/server/src/simcore_service_webserver/projects/projects_db.py @@ -594,6 +594,23 @@ async def list_all_projects_by_uuid_for_user(self, user_id: int) -> List[str]: result.append(row[0]) return list(result) + async def update_project_without_enforcing_checks( + self, project_data: Dict, project_uuid: str + ) -> bool: + """The garbage collector needs to alter the row without passing through the + permissions layer.""" + async with self.engine.acquire() as conn: + # update timestamps + project_data["lastChangeDate"] = now_str() + # now update it + result = await conn.execute( + # pylint: disable=no-value-for-parameter + projects.update() + .values(**_convert_to_db_names(project_data)) + .where(projects.c.uuid == project_uuid) + ) + return result.rowcount == 1 + def setup_projects_db(app: web.Application): db = ProjectDBAPI(app) diff --git a/services/web/server/src/simcore_service_webserver/resource_manager/__init__.py b/services/web/server/src/simcore_service_webserver/resource_manager/__init__.py index 8f0c6fb8754..a3a6eca8e6d 100644 --- a/services/web/server/src/simcore_service_webserver/resource_manager/__init__.py +++ b/services/web/server/src/simcore_service_webserver/resource_manager/__init__.py @@ -18,7 +18,7 @@ APP_RESOURCE_MANAGER_TASKS_KEY, CONFIG_SECTION_NAME, ) -from .garbage_collector import setup as setup_garbage_collector +from .garbage_collector import setup_garbage_collector from .redis import setup_redis_client from .registry import RedisResourceRegistry diff --git a/services/web/server/src/simcore_service_webserver/resource_manager/garbage_collector.py b/services/web/server/src/simcore_service_webserver/resource_manager/garbage_collector.py index 266fe5d2e2a..43e9aa60d14 100644 --- a/services/web/server/src/simcore_service_webserver/resource_manager/garbage_collector.py +++ b/services/web/server/src/simcore_service_webserver/resource_manager/garbage_collector.py @@ -1,18 +1,11 @@ -"""The garbage collector runs as an aiohttp background task at pre-defined interval until the aiohttp app is closed. - - Its tasks are to collect resources that are no longer "alive". - The tasks are defined as alive when the registry alive key is no longer available (see (registry.py)), - thus the corresponding key is deamed as dead, and so are its attached resources if any. - The garbage collector shall then close/delete these resources. -""" - import asyncio import logging from itertools import chain -from typing import Dict, List +from typing import Dict from aiohttp import web +from aiopg.sa.result import RowProxy from servicelib.observer import emit from servicelib.utils import logged_gather from simcore_service_webserver.director.director_api import ( @@ -27,14 +20,23 @@ delete_project_from_db, get_workbench_node_ids_from_project_uuid, is_node_id_present_in_any_project_workbench, + get_project_for_user, +) +from simcore_service_webserver.projects.projects_db import ( + APP_PROJECT_DBAPI, + ProjectAccessRights, ) -from simcore_service_webserver.projects.projects_db import APP_PROJECT_DBAPI from simcore_service_webserver.projects.projects_exceptions import ProjectNotFoundError from simcore_service_webserver.users_api import ( delete_user, get_guest_user_ids, is_user_guest, ) +from simcore_service_webserver import users_exceptions +from simcore_service_webserver.users_api import get_user, get_user_id_from_gid +from simcore_service_webserver.users_to_groups_api import get_users_for_gid +from simcore_service_webserver.groups_api import get_group_from_gid +from simcore_service_webserver.db_models import GroupType from .config import APP_GARBAGE_COLLECTOR_KEY, get_garbage_collector_interval from .registry import RedisResourceRegistry, get_registry @@ -42,39 +44,127 @@ logger = logging.getLogger(__name__) +async def setup_garbage_collector_task(app: web.Application): + app[APP_GARBAGE_COLLECTOR_KEY] = app.loop.create_task(garbage_collector_task(app)) + yield + task = app[APP_GARBAGE_COLLECTOR_KEY] + task.cancel() + await task + + +def setup_garbage_collector(app: web.Application): + app.cleanup_ctx.append(setup_garbage_collector_task) + + +async def garbage_collector_task(app: web.Application): + keep_alive = True + + while keep_alive: + logger.info("Starting garbage collector...") + try: + registry = get_registry(app) + interval = get_garbage_collector_interval(app) + while True: + await collect_garbage(registry, app) + await asyncio.sleep(interval) + + except asyncio.CancelledError: + keep_alive = False + logger.info("Garbage collection task was cancelled, it will not restart!") + except Exception: # pylint: disable=broad-except + logger.warning( + "There was an error during garbage collection, restarting...", + exc_info=True, + ) + # will wait 5 seconds before restarting to avoid restart loops + await asyncio.sleep(5) + + async def collect_garbage(registry: RedisResourceRegistry, app: web.Application): + """ + Garbage collection has the task of removing trash from the system. The trash + can be divided in: + + - Websockets & Redis (used to keep track of current active connections) + - GUEST users (used for temporary access to the system which are created on the fly) + - deletion of users. If a user needs to be deleted it is manually marked as GUEST + in the database + + The resources are Redis entries where all information regarding all the + websocket identifiers for all opened tabs accross all broser for each user + are stored. + + The alive/dead keys are normal Redis keys. To each key and ALIVE key is associated, + which has an assigned TTL. The browser will call the `client_heartbeat` websocket + endpoint to refresh the TTL, thus declaring that the user (websocket connection) is + still active. The `resource_deletion_timeout_seconds` is theTTL of the key. + + The field `garbage_collection_interval_seconds` defines the interval at which this + function will be called. + """ logger.info("collecting garbage...") + + # Removes disconnected user resources + # Triggers signal to close possible pending opened projects + # Removes disconnected GUEST users after they finished their sessions + await remove_disconnected_user_resources(registry, app) + + # Users manually marked for removal: + # if a user was manually marked as GUEST it needs to be + # removed together with all the associated projects + await remove_users_manually_marked_as_guests(registry, app) + + # For various reasons, some services remain pending after + # the projects are closed or the user was disconencted. + # This will close and remove all these services from + # the cluster, thus freeing important resources. + await remove_orphaned_services(registry, app) + + +async def remove_disconnected_user_resources( + registry: RedisResourceRegistry, app: web.Application +) -> None: + # alive_keys = currently "active" users + # dead_keys = users considered as "inactive" + # these keys hold references to more then one websocket connection ids + # the websocket ids are referred to as resources alive_keys, dead_keys = await registry.get_all_resource_keys() logger.debug("potential dead keys: %s", dead_keys) - # check if we find potential stuff to close + # clean up all the the websocket ids for the disconnected user for dead_key in dead_keys: - dead_resources = await registry.get_resources(dead_key) - if not dead_resources: - # no resource, remove the key then + dead_key_resources = await registry.get_resources(dead_key) + if not dead_key_resources: + # no websocket associated with this user, just removing key await registry.remove_key(dead_key) continue - logger.debug("found the following resources: %s", dead_resources) - # find if there are alive entries using these resources - for resource_name, resource_value in dead_resources.items(): + + logger.debug("Dead key '%s' resources: '%s'", dead_key, dead_key_resources) + + # removing all websocket references for the disconnected user + for resource_name, resource_value in dead_key_resources.items(): + # list of other websocket references to be removed other_keys = [ x for x in await registry.find_keys((resource_name, resource_value)) if x != dead_key ] - # the resource ref can be closed anyway - logger.debug("removing resource entry: %s: %s", dead_key, dead_resources) + + # it is safe to remove the current websocket entry for this user + logger.debug("removing resource '%s' for '%s' key", resource_name, dead_key) await registry.remove_resource(dead_key, resource_name) # check if the resource is still in use in the alive keys if not any(elem in alive_keys for elem in other_keys): - # remove the resource from the other keys as well + # remove the remaining websocket entries remove_tasks = [ registry.remove_resource(x, resource_name) for x in other_keys ] if remove_tasks: logger.debug( - "removing resource entry: %s: %s", other_keys, dead_resources + "removing resource entry: %s: %s", + other_keys, + dead_key_resources, ) await logged_gather(*remove_tasks, reraise=False) @@ -84,6 +174,7 @@ async def collect_garbage(registry: RedisResourceRegistry, app: web.Application) resource_value, dead_key, ) + # inform that the project can be closed on the backend side await emit( event="SIGNAL_PROJECT_CLOSE", user_id=None, @@ -91,30 +182,21 @@ async def collect_garbage(registry: RedisResourceRegistry, app: web.Application) app=app, ) - await remove_resources_if_guest_user( - app=app, - project_uuid=resource_value, - user_id=int(dead_key["user_id"]), + # if this user was a GUEST also remove it from the database + # with the only associated project owned + await remove_guest_user_with_all_its_resources( + app=app, user_id=int(dead_key["user_id"]), ) - # try to remove users which were marked as GUESTS manually - await remove_users_manually_marked_as_guests( - app=app, alive_keys=alive_keys, dead_keys=dead_keys - ) - - # remove possible pending contianers - await remove_orphaned_services(registry, app) - async def remove_users_manually_marked_as_guests( - app: web.Application, - alive_keys: List[Dict[str, str]], - dead_keys: List[Dict[str, str]], + registry: RedisResourceRegistry, app: web.Application ) -> None: """ Removes all the projects associated with GUEST users in the system. If the user defined a TEMPLATE, this one also gets removed. """ + alive_keys, dead_keys = await registry.get_all_resource_keys() user_ids_to_ignore = set() for entry in chain(alive_keys, dead_keys): @@ -130,21 +212,10 @@ async def remove_users_manually_marked_as_guests( guest_user_id, ) continue - logger.info("Will try to remove resources for guest '%s'", guest_user_id) - # get all projects for this user and then remove with remove_resources_if_guest_user - user_project_uuids = await app[APP_PROJECT_DBAPI].list_all_projects_by_uuid_for_user( - user_id=guest_user_id - ) - logger.info( - "Project uuids, to clean, for user '%s': '%s'", - guest_user_id, - user_project_uuids, - ) - for project_uuid in user_project_uuids: - await remove_resources_if_guest_user( - app=app, project_uuid=project_uuid, user_id=guest_user_id, - ) + await remove_guest_user_with_all_its_resources( + app=app, user_id=guest_user_id, + ) async def remove_orphaned_services( @@ -192,66 +263,237 @@ async def remove_orphaned_services( logger.info("Finished orphaned services removal") -async def remove_resources_if_guest_user( - app: web.Application, project_uuid: str, user_id: int +async def remove_guest_user_with_all_its_resources( + app: web.Application, user_id: int ) -> None: - """When a guest user finishes using the platform its Posgtres - and S3/MinIO entries need to be removed - """ + """Removes a GUEST user with all its associated projects and S3/MinIO files""" logger.debug("Will try to remove resources for user '%s' if GUEST", user_id) if not await is_user_guest(app, user_id): - logger.debug("User is not GUEST, skipping removal of its project resources") + logger.debug("User is not GUEST, skipping cleanup") return - logger.debug( - "Removing project '%s' from the database", project_uuid, - ) + await remove_all_projects_for_user(app=app, user_id=user_id) + await remove_user(app=app, user_id=user_id) - try: - await delete_project_from_db(app, project_uuid, user_id) - except ProjectNotFoundError: - logging.warning("Project '%s' not found, skipping removal", project_uuid) - # when manually changing a user to GUEST, it might happen that it has more then one project +async def remove_all_projects_for_user(app: web.Application, user_id: int) -> None: + """ + Goes through all the projects and will try to remove them but first it will check if + the project is shared with others. + Based on the given access rights it will deltermine the action to take: + - if other users have read access & execute access it will get deleted + - if other users have write access the project's owner will be changed to a new owner: + - if the project is directly shared with a one or more users, one of these + will be picked as the new owner + - if the project is not shared with any user but with groups of users, one + of the users inside the group (which currently exists) will be picked as + the new owner + """ + # recover user's primary_gid try: - await delete_user(app, user_id) - except Exception: # pylint: disable=broad-except + project_owner: Dict = await get_user(app=app, user_id=user_id) + except users_exceptions.UserNotFoundError: logger.warning( - "User '%s' still has some projects, could not be deleted", user_id + "Could not recover user data for user '%s', stopping removal of projects!", + user_id, ) + return + user_primary_gid: str = str(project_owner["primary_gid"]) + # fetch all projects for the user + user_project_uuids = await app[ + APP_PROJECT_DBAPI + ].list_all_projects_by_uuid_for_user(user_id=user_id) + logger.info( + "Project uuids, to clean, for user '%s': '%s'", user_id, user_project_uuids, + ) -async def garbage_collector_task(app: web.Application): - keep_alive = True - - while keep_alive: - logger.info("Starting garbage collector...") + for project_uuid in user_project_uuids: + logger.debug( + "Removing or transfering project '%s'", project_uuid, + ) try: - registry = get_registry(app) - interval = get_garbage_collector_interval(app) - while True: - await collect_garbage(registry, app) - await asyncio.sleep(interval) - - except asyncio.CancelledError: - keep_alive = False - logger.info("Garbage collection task was cancelled, it will not restart!") - except Exception: # pylint: disable=broad-except + project: Dict = await get_project_for_user( + app=app, + project_uuid=project_uuid, + user_id=user_id, + include_templates=True, + ) + except web.HTTPNotFound: logger.warning( - "There was an error during garbage collection, restarting...", - exc_info=True, + "Could not recover project data for project_uuid '%s', skipping...", + project_uuid, ) - # will wait 5 seconds before restarting to avoid restart loops - await asyncio.sleep(5) + continue + new_project_owner_gid = await get_new_project_owner_gid( + app=app, + project_uuid=project_uuid, + user_id=user_id, + user_primary_gid=user_primary_gid, + project=project, + ) -async def setup_garbage_collector_task(app: web.Application): - app[APP_GARBAGE_COLLECTOR_KEY] = app.loop.create_task(garbage_collector_task(app)) - yield - task = app[APP_GARBAGE_COLLECTOR_KEY] - task.cancel() - await task + if new_project_owner_gid is None: + # when no new owner is found just remove the project + logger.info( + "The project can be removed as is not shared with write access with other users" + ) + try: + await delete_project_from_db(app, project_uuid, user_id) + except ProjectNotFoundError: + logging.warning( + "Project '%s' not found, skipping removal", project_uuid + ) + continue + # Try to change the project owner and remove access rights from the current owner + await replace_current_owner( + app=app, + project_uuid=project_uuid, + user_primary_gid=user_primary_gid, + new_project_owner_gid=new_project_owner_gid, + project=project, + ) + + +async def get_new_project_owner_gid( + app: web.Application, + project_uuid: str, + user_id: int, + user_primary_gid: int, + project: RowProxy, +) -> str: + """Goes through the access rights and tries to find a new suitable owner. + The first viable user is selected as a new owner. + In order to become a new owner the user must have write access right. + """ + + access_rights = project["accessRights"] + other_users_access_rights = set(access_rights.keys()) - {user_primary_gid} + logger.debug( + "Processing other user and groups access rights '%s'", + other_users_access_rights, + ) + + # Selecting a new project owner + # divide permissions between types of groups + standard_groups = {} # groups of users, multiple users can be part of this + primary_groups = {} # each individual user has a unique primary group + for other_gid in other_users_access_rights: + group = await get_group_from_gid(app=app, gid=int(other_gid)) + # only process for users and groups with write access right + if group is None: + continue + if access_rights[other_gid]["write"] is not True: + continue + + if group.type == GroupType.STANDARD: + standard_groups[other_gid] = access_rights[other_gid] + elif group.type == GroupType.PRIMARY: + primary_groups[other_gid] = access_rights[other_gid] + + logger.debug( + "Possible new owner groups: standard='%s', primary='%s'", + standard_groups, + primary_groups, + ) + + new_project_owner_gid = None + # the primary group contains the users which which the project was directly shared + if len(primary_groups) > 0: + # fetch directly from the direct users with which the project is shared with + new_project_owner_gid = list(primary_groups.keys())[0] + # fallback to the groups search if the user does not exist + if len(standard_groups) > 0 and new_project_owner_gid is None: + new_project_owner_gid = await fetch_new_project_owner_from_groups( + app=app, standard_groups=standard_groups, user_id=user_id, + ) + + logger.info( + "Will move project '%s' to user with gid '%s', if user exists", + project_uuid, + new_project_owner_gid, + ) + + return new_project_owner_gid + + +async def fetch_new_project_owner_from_groups( + app: web.Application, standard_groups: Dict, user_id: int +) -> int: + """Iterate over all the users in a group and if the users exists in the db + return its gid""" + + # fetch all users in the group and then get their gid to put in here + # go through user_to_groups table and fetch all uid for matching gid + for group_gid in standard_groups.keys(): + # remove the current owner from the bunch + target_group_users = await get_users_for_gid(app=app, gid=group_gid) - {user_id} + logger.error("Found group users '%s'", target_group_users) + + for possible_user_id in target_group_users: + # check if the possible_user is still present in the db + try: + possible_user = await get_user(app=app, user_id=possible_user_id) + return possible_user["primary_gid"] + except users_exceptions.UserNotFoundError: + logger.warning( + "Could not find new owner '%s' will try a new one", + possible_user_id, + ) + return None + + +async def replace_current_owner( + app: web.Application, + project_uuid: str, + user_primary_gid: int, + new_project_owner_gid: str, + project: RowProxy, +) -> None: + try: + new_project_owner_id = await get_user_id_from_gid( + app=app, primary_gid=int(new_project_owner_gid) + ) + except Exception: # pylint: disable=broad-except + logger.exception( + "Could not recover new user id from gid %s", new_project_owner_gid + ) + return + # the result might me none + if new_project_owner_id is None: + logger.warning( + "Could not recover a new user id from gid %s", new_project_owner_gid + ) + return + + # unseting the project owner and saving the project back + project["prj_owner"] = int(new_project_owner_id) + # removing access rights entry + del project["accessRights"][str(user_primary_gid)] + project["accessRights"][ + str(new_project_owner_gid) + ] = ProjectAccessRights.OWNER.value + logger.error("Syncing back project %s", project) + # syncing back project data + try: + await app[APP_PROJECT_DBAPI].update_project_without_enforcing_checks( + project_data=project, project_uuid=project_uuid, + ) + except Exception: # pylint: disable=broad-except + logger.exception( + "Could not remove old owner and replaced it with user %s", + new_project_owner_id, + ) + + +async def remove_user(app: web.Application, user_id: int) -> None: + """Tries to remove a user, if the users still exists a warning message will be displayed""" + try: + await delete_user(app, user_id) + except Exception: # pylint: disable=broad-except + logger.warning( + "User '%s' still has some projects, could not be deleted", user_id + ) -def setup(app: web.Application): - app.cleanup_ctx.append(setup_garbage_collector_task) diff --git a/services/web/server/src/simcore_service_webserver/socketio/handlers.py b/services/web/server/src/simcore_service_webserver/socketio/handlers.py index 61bb9268ffe..aa02e25fa67 100644 --- a/services/web/server/src/simcore_service_webserver/socketio/handlers.py +++ b/services/web/server/src/simcore_service_webserver/socketio/handlers.py @@ -93,7 +93,7 @@ async def set_user_in_rooms( ) -> None: user_id = request.get(RQT_USERID_KEY, ANONYMOUS_USER_ID) primary_group, user_groups, all_group = await list_user_groups(app, user_id) - groups = [primary_group] + user_groups + [all_group] + groups = [primary_group] + user_groups + ([all_group] if bool(all_group) else []) sio = get_socket_server(app) # TODO: check if it is necessary to leave_room when socket disconnects for group in groups: diff --git a/services/web/server/src/simcore_service_webserver/users_api.py b/services/web/server/src/simcore_service_webserver/users_api.py index 9d3f95ad0a6..832da0038a2 100644 --- a/services/web/server/src/simcore_service_webserver/users_api.py +++ b/services/web/server/src/simcore_service_webserver/users_api.py @@ -239,3 +239,11 @@ async def get_user(app: web.Application, user_id: int) -> Dict: if not row: raise UserNotFoundError(uid=user_id) return dict(row) + + +async def get_user_id_from_gid(app: web.Application, primary_gid: int) -> int: + engine = app[APP_DB_ENGINE_KEY] + async with engine.acquire() as conn: + return await conn.scalar( + sa.select([users.c.id]).where(users.c.primary_gid == primary_gid) + ) diff --git a/services/web/server/src/simcore_service_webserver/users_to_groups_api.py b/services/web/server/src/simcore_service_webserver/users_to_groups_api.py new file mode 100644 index 00000000000..7bf48025a82 --- /dev/null +++ b/services/web/server/src/simcore_service_webserver/users_to_groups_api.py @@ -0,0 +1,18 @@ +from typing import Set +import sqlalchemy as sa +from aiohttp import web + +from servicelib.application_keys import APP_DB_ENGINE_KEY +from .db_models import user_to_groups + + +async def get_users_for_gid(app: web.Application, gid: int) -> Set[int]: + engine = app[APP_DB_ENGINE_KEY] + result = set() + async with engine.acquire() as conn: + query_result = await conn.execute( + sa.select([user_to_groups.c.uid]).where(user_to_groups.c.gid == gid) + ) + async for entry in query_result: + result.add(entry[0]) + return result diff --git a/services/web/server/tests/conftest.py b/services/web/server/tests/conftest.py index 3aa6b5b7f65..33c088c69c0 100644 --- a/services/web/server/tests/conftest.py +++ b/services/web/server/tests/conftest.py @@ -10,12 +10,16 @@ import logging import sys from pathlib import Path +from typing import Dict import pytest import simcore_service_webserver +from integration.utils import get_fake_data_dir, get_fake_project + current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + log = logging.getLogger(__name__) # mute noisy loggers @@ -40,6 +44,11 @@ def api_specs_dir(osparc_simcore_root_dir: Path) -> Path: @pytest.fixture(scope="session") def fake_data_dir() -> Path: - dirpath = (current_dir / "data").resolve() - assert dirpath.exists() - return dirpath + fake_data_dir = get_fake_data_dir() + assert fake_data_dir.exists() + return fake_data_dir + + +@pytest.fixture +def fake_project(fake_data_dir: Path) -> Dict: + return get_fake_project() diff --git a/services/web/server/tests/integration/test_garbage_collection.py b/services/web/server/tests/integration/test_garbage_collection.py new file mode 100644 index 00000000000..415a78e85d2 --- /dev/null +++ b/services/web/server/tests/integration/test_garbage_collection.py @@ -0,0 +1,843 @@ +# pylint:disable=redefined-outer-name,unused-argument,too-many-arguments + + +import asyncio +import logging +from copy import deepcopy +from typing import Dict, List +from uuid import uuid4 + +import aiopg +import aioredis +import pytest +from pytest_simcore.helpers.utils_login import log_client_in +from pytest_simcore.helpers.utils_projects import create_project, empty_project_data +from servicelib.application import create_safe_application +from utils import get_fake_project + +from simcore_service_webserver.db import setup_db +from simcore_service_webserver.db_models import projects, users +from simcore_service_webserver.director import setup_director +from simcore_service_webserver.groups_api import ( + add_user_in_group, + create_user_group, + list_user_groups, +) +from simcore_service_webserver.login import setup_login +from simcore_service_webserver.projects import setup_projects +from simcore_service_webserver.resource_manager import setup_resource_manager +from simcore_service_webserver.resource_manager.registry import get_registry +from simcore_service_webserver.rest import setup_rest +from simcore_service_webserver.security import setup_security +from simcore_service_webserver.security_roles import UserRole +from simcore_service_webserver.session import setup_session +from simcore_service_webserver.socketio import setup_sockets +from simcore_service_webserver.users import setup_users + +log = logging.getLogger(__name__) + +core_services = ["postgres", "redis", "storage"] +ops_services = ["minio", "adminer"] + + +API_VERSION = "v0" +GARBAGE_COLLECTOR_INTERVAL = 1 +SERVICE_DELETION_DELAY = 1 +# ensure enough time has passed and GC was triggered +WAIT_FOR_COMPLETE_GC_CYCLE = GARBAGE_COLLECTOR_INTERVAL + SERVICE_DELETION_DELAY + 1 + + +@pytest.fixture +async def db_engine(postgres_dsn: Dict) -> aiopg.sa.Engine: + dsn = "postgresql://{user}:{password}@{host}:{port}/{database}".format( + **postgres_dsn + ) + return await aiopg.sa.create_engine(dsn) + + +@pytest.fixture(autouse=True) +def __drop_and_recreate_postgres__(database_from_template_before_each_function) -> None: + yield + + +@pytest.fixture(autouse=True) +async def __delete_all_redis_keys__(loop, redis_service): + client = await aioredis.create_redis_pool(str(redis_service), encoding="utf-8") + await client.flushall() + client.close() + await client.wait_closed() + + yield + # do nothing on teadown + + +@pytest.fixture +def client( + loop, aiohttp_client, app_config, postgres_with_template_db, mock_orphaned_services +): + cfg = deepcopy(app_config) + + assert cfg["rest"]["version"] == API_VERSION + assert cfg["rest"]["enabled"] + cfg["projects"]["enabled"] = True + cfg["director"]["enabled"] = True + cfg["resource_manager"][ + "garbage_collection_interval_seconds" + ] = GARBAGE_COLLECTOR_INTERVAL # increase speed of garbage collection + cfg["resource_manager"][ + "resource_deletion_timeout_seconds" + ] = SERVICE_DELETION_DELAY # reduce deletion delay + + # fake config + app = create_safe_application(cfg) + + # activates only security+restAPI sub-modules + setup_db(app) + setup_session(app) + setup_security(app) + setup_rest(app) + setup_login(app) + setup_users(app) + setup_sockets(app) + setup_projects(app) + setup_director(app) + assert setup_resource_manager(app) + + yield loop.run_until_complete( + aiohttp_client( + app, + server_kwargs={"port": cfg["main"]["port"], "host": cfg["main"]["host"]}, + ) + ) + + +################ utils + + +async def login_user(client): + """returns a logged in regular user""" + return await log_client_in(client=client, user_data={"role": UserRole.USER.name}) + + +async def login_guest_user(client): + """returns a logged in Guest user""" + return await log_client_in(client=client, user_data={"role": UserRole.GUEST.name}) + + +async def new_project(client, user, access_rights=None): + """returns a project for the given user""" + project_data = empty_project_data() + if access_rights is not None: + project_data["accessRights"] = access_rights + return await create_project(client.app, project_data, user["id"]) + + +async def get_template_project(client, user, access_rights=None): + """returns a tempalte shared with all""" + _, _, all_group = await list_user_groups(client.app, user["id"]) + + # the information comes from a file, randomize it + project_data = get_fake_project() + project_data["name"] = "Fake template" + str(uuid4()) + project_data["uuid"] = str(uuid4()) + project_data["accessRights"] = { + str(all_group["gid"]): {"read": True, "write": False, "delete": False} + } + if access_rights is not None: + project_data["accessRights"].update(access_rights) + + return await create_project(client.app, project_data, user["id"]) + + +async def get_group(client, user): + """Creates a group for a given user""" + return await create_user_group( + app=client.app, + user_id=user["id"], + new_group={"label": uuid4(), "description": uuid4(), "thumbnail": None}, + ) + + +async def invite_user_to_group(client, owner, invitee, group): + """Invite a user to a group on which the owner has writes over""" + await add_user_in_group( + client.app, owner["id"], group["gid"], new_user_id=invitee["id"], + ) + + +async def change_user_role( + db_engine: aiopg.sa.Engine, user: Dict, role: UserRole +) -> None: + async with db_engine.acquire() as conn: + await conn.execute( + users.update().where(users.c.id == int(user["id"])).values(role=role.value) + ) + + +async def connect_to_socketio(client, user, socketio_client): + """Connect a user to a socket.io""" + socket_registry = get_registry(client.server.app) + cur_client_session_id = str(uuid4()) + sio = await socketio_client(cur_client_session_id, client) + resource_key = { + "user_id": str(user["id"]), + "client_session_id": cur_client_session_id, + } + assert await socket_registry.find_keys(("socket_id", sio.sid)) == [resource_key] + assert sio.sid in await socket_registry.find_resources(resource_key, "socket_id") + assert len(await socket_registry.find_resources(resource_key, "socket_id")) == 1 + sio_connection_data = sio, resource_key + return sio_connection_data + + +async def disconnect_user_from_socketio(client, sio_connection_data): + """disconnect a previously connected socket.io connection""" + sio, resource_key = sio_connection_data + sid = sio.sid + socket_registry = get_registry(client.server.app) + await sio.disconnect() + assert not sio.sid + assert not await socket_registry.find_keys(("socket_id", sio.sid)) + assert not sid in await socket_registry.find_resources(resource_key, "socket_id") + assert not await socket_registry.find_resources(resource_key, "socket_id") + + +async def assert_users_count(db_engine: aiopg.sa.Engine, expected_users: int) -> True: + async with db_engine.acquire() as conn: + users_count = await conn.scalar(users.count()) + assert users_count == expected_users + return True + + +async def assert_projects_count( + db_engine: aiopg.sa.Engine, expected_projects: int +) -> True: + async with db_engine.acquire() as conn: + projects_count = await conn.scalar(projects.count()) + assert projects_count == expected_projects + return True + + +def assert_dicts_match_by_common_keys(first_dict, second_dict) -> True: + common_keys = set(first_dict.keys()) & set(second_dict.keys()) + for key in common_keys: + assert first_dict[key] == second_dict[key], key + + return True + + +async def query_user_from_db(db_engine: aiopg.sa.Engine, user: Dict): + """Retruns a user from the db""" + async with db_engine.acquire() as conn: + user_result = await conn.execute( + users.select().where(users.c.id == int(user["id"])) + ) + return await user_result.first() + + +async def query_project_from_db(db_engine: aiopg.sa.Engine, user_project: Dict): + async with db_engine.acquire() as conn: + project_result = await conn.execute( + projects.select().where(projects.c.uuid == user_project["uuid"]) + ) + return await project_result.first() + + +async def assert_user_in_database( + db_engine: aiopg.sa.Engine, logged_user: Dict +) -> True: + user = await query_user_from_db(db_engine, logged_user) + user_as_dict = dict(user) + + # some values need to be transformed + user_as_dict["role"] = user_as_dict["role"].value + user_as_dict["status"] = user_as_dict["status"].value + + assert assert_dicts_match_by_common_keys(user_as_dict, logged_user) is True + + return True + + +async def assert_user_not_in_database(db_engine: aiopg.sa.Engine, user: Dict) -> True: + user = await query_user_from_db(db_engine, user) + assert user is None + + return True + + +async def assert_project_in_database( + db_engine: aiopg.sa.Engine, user_project: Dict +) -> True: + project = await query_project_from_db(db_engine, user_project) + project_as_dict = dict(project) + + assert assert_dicts_match_by_common_keys(project_as_dict, user_project) is True + + return True + + +async def assert_user_is_owner_of_project( + db_engine: aiopg.sa.Engine, owner_user: Dict, owner_project: Dict +) -> True: + user = await query_user_from_db(db_engine, owner_user) + project = await query_project_from_db(db_engine, owner_project) + + assert user.id == project.prj_owner + + return True + + +async def assert_one_owner_for_project( + db_engine: aiopg.sa.Engine, project: Dict, possible_owners: List[Dict] +) -> True: + q_owners = [await query_user_from_db(db_engine, owner) for owner in possible_owners] + q_project = await query_project_from_db(db_engine, project) + + assert q_project.prj_owner in set([x.id for x in q_owners]) + + return True + + +################ end utils + + +async def test_t1_while_guest_is_connected_no_resources_are_removed( + client, socketio_client, db_engine, redis_client +): + """while a GUEST user is connected GC will not remove none of its projects nor the user itself""" + logged_guest_user = await login_guest_user(client) + empty_guest_user_project = await new_project(client, logged_guest_user) + + assert await assert_users_count(db_engine, 1) is True + assert await assert_projects_count(db_engine, 1) is True + + await connect_to_socketio(client, logged_guest_user, socketio_client) + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + assert await assert_user_in_database(db_engine, logged_guest_user) is True + assert await assert_project_in_database(db_engine, empty_guest_user_project) is True + + +async def test_t2_cleanup_resources_after_browser_is_closed( + simcore_services, client, socketio_client, db_engine, redis_client +): + """ after a GUEST users with one opened project closes browser tab regularly (GC cleans everything) """ + logged_guest_user = await login_guest_user(client) + empty_guest_user_project = await new_project(client, logged_guest_user) + assert await assert_users_count(db_engine, 1) is True + assert await assert_projects_count(db_engine, 1) is True + + sio_connection_data = await connect_to_socketio( + client, logged_guest_user, socketio_client + ) + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # check user and project are still in the DB + assert await assert_user_in_database(db_engine, logged_guest_user) is True + assert await assert_project_in_database(db_engine, empty_guest_user_project) is True + + await disconnect_user_from_socketio(client, sio_connection_data) + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # check user and project are no longer in the DB + async with db_engine.acquire() as conn: + user_result = await conn.execute(users.select()) + user = await user_result.first() + project_result = await conn.execute(projects.select()) + project = await project_result.first() + + assert user is None + assert project is None + + +async def test_t3_gc_will_not_intervene_for_regular_users_and_their_resources( + simcore_services, client, socketio_client, db_engine +): + """ after a USER disconnects the GC will remove none of its projects or templates nor the user itself """ + number_of_projects = 5 + number_of_templates = 5 + logged_user = await login_user(client) + user_projects = [ + await new_project(client, logged_user) for _ in range(number_of_projects) + ] + user_template_projects = [ + await get_template_project(client, logged_user) + for _ in range(number_of_templates) + ] + + async def assert_projects_and_users_are_present(): + # check user and projects and templates are still in the DB + assert await assert_user_in_database(db_engine, logged_user) is True + for project in user_projects: + assert await assert_project_in_database(db_engine, project) is True + for template in user_template_projects: + assert await assert_project_in_database(db_engine, template) is True + + assert await assert_users_count(db_engine, 1) is True + expected_count = number_of_projects + number_of_templates + assert await assert_projects_count(db_engine, expected_count) is True + + # connect the user and wait for gc + sio_connection_data = await connect_to_socketio( + client, logged_user, socketio_client + ) + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + await assert_projects_and_users_are_present() + + await disconnect_user_from_socketio(client, sio_connection_data) + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + await assert_projects_and_users_are_present() + + +async def test_t4_project_shared_with_group_transferred_to_user_in_group_on_owner_removal( + simcore_services, client, db_engine +): + """ + USER "u1" creates a GROUP "g1" and invites USERS "u2" and "u3"; + USER "u1" creates a project and shares it with "g1"; + USER "u1" is manually marked as "GUEST"; + EXPECTED: one of the users in the "g1" will become the new owner of the project and "u1" will be deleted + """ + u1 = await login_user(client) + u2 = await login_user(client) + u3 = await login_user(client) + + # creating g1 and inviting u2 and u3 + g1 = await get_group(client, u1) + await invite_user_to_group(client, owner=u1, invitee=u2, group=g1) + await invite_user_to_group(client, owner=u1, invitee=u3, group=g1) + + # u1 creates project and shares it with g1 + project = await new_project( + client, + u1, + access_rights={str(g1["gid"]): {"read": True, "write": True, "delete": False}}, + ) + + # mark u1 as guest + await change_user_role(db_engine, u1, UserRole.GUEST) + + assert await assert_users_count(db_engine, 3) is True + assert await assert_projects_count(db_engine, 1) is True + assert await assert_user_is_owner_of_project(db_engine, u1, project) is True + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # expected outcome: u1 was deleted, one of the users in g1 is the new owner + assert await assert_user_not_in_database(db_engine, u1) is True + assert await assert_one_owner_for_project(db_engine, project, [u2, u3]) is True + + +async def test_t5_project_shared_with_other_users_transferred_to_one_of_them( + simcore_services, client, db_engine +): + """ + USER "u1" creates a project and shares it with "u2" and "u3"; + USER "u1" is manually marked as "GUEST"; + EXPECTED: one of "u2" or "u3" will become the new owner of the project and "u1" will be deleted + """ + u1 = await login_user(client) + u2 = await login_user(client) + u3 = await login_user(client) + + q_u2 = await query_user_from_db(db_engine, u2) + q_u3 = await query_user_from_db(db_engine, u3) + + # u1 creates project and shares it with g1 + project = await new_project( + client, + u1, + access_rights={ + str(q_u2.primary_gid): {"read": True, "write": True, "delete": False}, + str(q_u3.primary_gid): {"read": True, "write": True, "delete": False}, + }, + ) + + # mark u1 as guest + await change_user_role(db_engine, u1, UserRole.GUEST) + + assert await assert_users_count(db_engine, 3) is True + assert await assert_projects_count(db_engine, 1) is True + assert await assert_user_is_owner_of_project(db_engine, u1, project) is True + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # expected outcome: u1 was deleted, one of the users in g1 is the new owner + assert await assert_user_not_in_database(db_engine, u1) is True + assert await assert_one_owner_for_project(db_engine, project, [u2, u3]) is True + + +async def test_t6_project_shared_with_group_transferred_to_last_user_in_group_on_owner_removal( + simcore_services, client, db_engine +): + """ + USER "u1" creates a GROUP "g1" and invites USERS "u2" and "u3"; + USER "u1" creates a project and shares it with "g1"; + USER "u1" is manually marked as "GUEST"; + EXPECTED: one of the users in the "g1" will become the new owner of the project and "u1" will be deleted + the new owner either "u2" or "u3" will be manually marked as "GUEST"; + EXPECTED: the GUEST user will be deleted and the project will pass to the last member of "g1" + """ + u1 = await login_user(client) + u2 = await login_user(client) + u3 = await login_user(client) + + # creating g1 and inviting u2 and u3 + g1 = await get_group(client, u1) + await invite_user_to_group(client, owner=u1, invitee=u2, group=g1) + await invite_user_to_group(client, owner=u1, invitee=u3, group=g1) + + # u1 creates project and shares it with g1 + project = await new_project( + client, + u1, + access_rights={str(g1["gid"]): {"read": True, "write": True, "delete": False}}, + ) + + # mark u1 as guest + await change_user_role(db_engine, u1, UserRole.GUEST) + + assert await assert_users_count(db_engine, 3) is True + assert await assert_projects_count(db_engine, 1) is True + assert await assert_user_is_owner_of_project(db_engine, u1, project) is True + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # expected outcome: u1 was deleted, one of the users in g1 is the new owner + assert await assert_user_not_in_database(db_engine, u1) is True + assert await assert_one_owner_for_project(db_engine, project, [u2, u3]) is True + + # find new owner and mark hims as GUEST + q_u2 = await query_user_from_db(db_engine, u2) + q_u3 = await query_user_from_db(db_engine, u3) + q_project = await query_project_from_db(db_engine, project) + + new_owner = None + remaining_others = [] + for user in [q_u2, q_u3]: + if user.id == q_project.prj_owner: + new_owner = user + else: + remaining_others.append(user) + + assert new_owner is not None # expected to a new owner between the 2 other users + # mark new owner as guest + await change_user_role(db_engine, new_owner, UserRole.GUEST) + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # expected outcome: the new_owner will be deleted and one of the remainint_others wil be the new owner + assert await assert_user_not_in_database(db_engine, new_owner) is True + assert ( + await assert_one_owner_for_project(db_engine, project, remaining_others) is True + ) + + +async def test_t7_project_shared_with_group_transferred_from_one_member_to_the_last_and_all_is_removed( + simcore_services, client, db_engine +): + """ + USER "u1" creates a GROUP "g1" and invites USERS "u2" and "u3"; + USER "u1" creates a project and shares it with "g1"; + USER "u1" is manually marked as "GUEST"; + EXPECTED: one of the users in the "g1" will become the new owner of the project and "u1" will be deleted + the new owner either "u2" or "u3" will be manually marked as "GUEST"; + EXPECTED: the GUEST user will be deleted and the project will pass to the last member of "g1" + afterwards the last user will be marked as "GUEST"; + EXPECTED: the last user will be removed and the project will be removed + """ + u1 = await login_user(client) + u2 = await login_user(client) + u3 = await login_user(client) + + # creating g1 and inviting u2 and u3 + g1 = await get_group(client, u1) + await invite_user_to_group(client, owner=u1, invitee=u2, group=g1) + await invite_user_to_group(client, owner=u1, invitee=u3, group=g1) + + # u1 creates project and shares it with g1 + project = await new_project( + client, + u1, + access_rights={str(g1["gid"]): {"read": True, "write": True, "delete": False}}, + ) + + # mark u1 as guest + await change_user_role(db_engine, u1, UserRole.GUEST) + + assert await assert_users_count(db_engine, 3) is True + assert await assert_projects_count(db_engine, 1) is True + assert await assert_user_is_owner_of_project(db_engine, u1, project) is True + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # expected outcome: u1 was deleted, one of the users in g1 is the new owner + assert await assert_user_not_in_database(db_engine, u1) is True + assert await assert_one_owner_for_project(db_engine, project, [u2, u3]) is True + + # find new owner and mark hims as GUEST + q_u2 = await query_user_from_db(db_engine, u2) + q_u3 = await query_user_from_db(db_engine, u3) + q_project = await query_project_from_db(db_engine, project) + + new_owner = None + remaining_others = [] + for user in [q_u2, q_u3]: + if user.id == q_project.prj_owner: + new_owner = user + else: + remaining_others.append(user) + + assert new_owner is not None # expected to a new owner between the 2 other users + # mark new owner as guest + await change_user_role(db_engine, new_owner, UserRole.GUEST) + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # expected outcome: the new_owner will be deleted and one of the remainint_others wil be the new owner + assert await assert_user_not_in_database(db_engine, new_owner) is True + assert ( + await assert_one_owner_for_project(db_engine, project, remaining_others) is True + ) + + # only 1 user is left as the owner mark him as GUEST + for user in remaining_others: + # mark new owner as guest + await change_user_role(db_engine, user, UserRole.GUEST) + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # expected outcome: the last user will be removed and the project will be removed + assert await assert_users_count(db_engine, 0) is True + assert await assert_projects_count(db_engine, 0) is True + + +async def test_t8_project_shared_with_other_users_transferred_to_one_of_them_until_one_user_remains( + simcore_services, client, db_engine +): + """ + USER "u1" creates a project and shares it with "u2" and "u3"; + USER "u1" is manually marked as "GUEST"; + EXPECTED: one of "u2" or "u3" will become the new owner of the project and "u1" will be deleted + same as T5 => afterwards afterwards the new owner either "u2" or "u3" will be manually marked as "GUEST"; + EXPECTED: the GUEST user will be deleted and the project will pass to the last member of "g1" + """ + u1 = await login_user(client) + u2 = await login_user(client) + u3 = await login_user(client) + + q_u2 = await query_user_from_db(db_engine, u2) + q_u3 = await query_user_from_db(db_engine, u3) + + # u1 creates project and shares it with g1 + project = await new_project( + client, + u1, + access_rights={ + str(q_u2.primary_gid): {"read": True, "write": True, "delete": False}, + str(q_u3.primary_gid): {"read": True, "write": True, "delete": False}, + }, + ) + + # mark u1 as guest + await change_user_role(db_engine, u1, UserRole.GUEST) + + assert await assert_users_count(db_engine, 3) is True + assert await assert_projects_count(db_engine, 1) is True + assert await assert_user_is_owner_of_project(db_engine, u1, project) is True + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # expected outcome: u1 was deleted, one of the users in g1 is the new owner + assert await assert_user_not_in_database(db_engine, u1) is True + assert await assert_one_owner_for_project(db_engine, project, [u2, u3]) is True + + # find new owner and mark hims as GUEST + q_u2 = await query_user_from_db(db_engine, u2) + q_u3 = await query_user_from_db(db_engine, u3) + q_project = await query_project_from_db(db_engine, project) + + new_owner = None + remaining_others = [] + for user in [q_u2, q_u3]: + if user.id == q_project.prj_owner: + new_owner = user + else: + remaining_others.append(user) + + assert new_owner is not None # expected to a new owner between the 2 other users + # mark new owner as guest + await change_user_role(db_engine, new_owner, UserRole.GUEST) + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # expected outcome: the new_owner will be deleted and one of the remainint_others wil be the new owner + assert await assert_user_not_in_database(db_engine, new_owner) is True + assert ( + await assert_one_owner_for_project(db_engine, project, remaining_others) is True + ) + assert await assert_users_count(db_engine, 1) is True + assert await assert_projects_count(db_engine, 1) is True + + +async def test_t9_project_shared_with_other_users_transferred_between_them_and_then_removed( + simcore_services, client, db_engine +): + """ + USER "u1" creates a project and shares it with "u2" and "u3"; + USER "u1" is manually marked as "GUEST"; + EXPECTED: one of "u2" or "u3" will become the new owner of the project and "u1" will be deleted + same as T5 => afterwards afterwards the new owner either "u2" or "u3" will be manually marked as "GUEST"; + EXPECTED: the GUEST user will be deleted and the project will pass to the last member of "g1" + same as T8 => afterwards the last user will be marked as "GUEST"; + EXPECTED: the last user will be removed and the project will be removed + """ + u1 = await login_user(client) + u2 = await login_user(client) + u3 = await login_user(client) + + q_u2 = await query_user_from_db(db_engine, u2) + q_u3 = await query_user_from_db(db_engine, u3) + + # u1 creates project and shares it with g1 + project = await new_project( + client, + u1, + access_rights={ + str(q_u2.primary_gid): {"read": True, "write": True, "delete": False}, + str(q_u3.primary_gid): {"read": True, "write": True, "delete": False}, + }, + ) + + # mark u1 as guest + await change_user_role(db_engine, u1, UserRole.GUEST) + + assert await assert_users_count(db_engine, 3) is True + assert await assert_projects_count(db_engine, 1) is True + assert await assert_user_is_owner_of_project(db_engine, u1, project) is True + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # expected outcome: u1 was deleted, one of the users in g1 is the new owner + assert await assert_user_not_in_database(db_engine, u1) is True + assert await assert_one_owner_for_project(db_engine, project, [u2, u3]) is True + + # find new owner and mark hims as GUEST + q_u2 = await query_user_from_db(db_engine, u2) + q_u3 = await query_user_from_db(db_engine, u3) + q_project = await query_project_from_db(db_engine, project) + + new_owner = None + remaining_others = [] + for user in [q_u2, q_u3]: + if user.id == q_project.prj_owner: + new_owner = user + else: + remaining_others.append(user) + + assert new_owner is not None # expected to a new owner between the 2 other users + # mark new owner as guest + await change_user_role(db_engine, new_owner, UserRole.GUEST) + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # expected outcome: the new_owner will be deleted and one of the remainint_others wil be the new owner + assert await assert_user_not_in_database(db_engine, new_owner) is True + assert ( + await assert_one_owner_for_project(db_engine, project, remaining_others) is True + ) + assert await assert_users_count(db_engine, 1) is True + assert await assert_projects_count(db_engine, 1) is True + + # only 1 user is left as the owner mark him as GUEST + for user in remaining_others: + # mark new owner as guest + await change_user_role(db_engine, user, UserRole.GUEST) + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + # expected outcome: the last user will be removed and the project will be removed + assert await assert_users_count(db_engine, 0) is True + assert await assert_projects_count(db_engine, 0) is True + + +async def test_t10_owner_and_all_shared_users_marked_as_guests( + simcore_services, client, db_engine +): + """ + USER "u1" creates a project and shares it with "u2" and "u3"; + USER "u1", "u2" and "u3" are manually marked as "GUEST"; + EXPECTED: the project and all the users are removed + """ + u1 = await login_user(client) + u2 = await login_user(client) + u3 = await login_user(client) + + q_u2 = await query_user_from_db(db_engine, u2) + q_u3 = await query_user_from_db(db_engine, u3) + + # u1 creates project and shares it with g1 + project = await new_project( + client, + u1, + access_rights={ + str(q_u2.primary_gid): {"read": True, "write": True, "delete": False}, + str(q_u3.primary_gid): {"read": True, "write": True, "delete": False}, + }, + ) + + # mark all users as guest + await change_user_role(db_engine, u1, UserRole.GUEST) + await change_user_role(db_engine, u2, UserRole.GUEST) + await change_user_role(db_engine, u3, UserRole.GUEST) + + assert await assert_users_count(db_engine, 3) is True + assert await assert_projects_count(db_engine, 1) is True + assert await assert_user_is_owner_of_project(db_engine, u1, project) is True + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + assert await assert_users_count(db_engine, 0) is True + assert await assert_projects_count(db_engine, 0) is True + + +async def test_t11_owner_and_all_users_in_group_marked_as_guests( + simcore_services, client, db_engine +): + """ + USER "u1" creates a group and invites "u2" and "u3"; + USER "u1" creates a project and shares it with the group + USER "u1", "u2" and "u3" are manually marked as "GUEST" + EXPECTED: the project and all the users are removed + """ + u1 = await login_user(client) + u2 = await login_user(client) + u3 = await login_user(client) + + # creating g1 and inviting u2 and u3 + g1 = await get_group(client, u1) + await invite_user_to_group(client, owner=u1, invitee=u2, group=g1) + await invite_user_to_group(client, owner=u1, invitee=u3, group=g1) + + # u1 creates project and shares it with g1 + project = await new_project( + client, + u1, + access_rights={str(g1["gid"]): {"read": True, "write": True, "delete": False}}, + ) + + # mark all users as guest + await change_user_role(db_engine, u1, UserRole.GUEST) + await change_user_role(db_engine, u2, UserRole.GUEST) + await change_user_role(db_engine, u3, UserRole.GUEST) + + assert await assert_users_count(db_engine, 3) is True + assert await assert_projects_count(db_engine, 1) is True + assert await assert_user_is_owner_of_project(db_engine, u1, project) is True + + await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE) + + assert await assert_users_count(db_engine, 0) is True + assert await assert_projects_count(db_engine, 0) is True diff --git a/services/web/server/tests/integration/utils.py b/services/web/server/tests/integration/utils.py new file mode 100644 index 00000000000..d6120ffafc1 --- /dev/null +++ b/services/web/server/tests/integration/utils.py @@ -0,0 +1,15 @@ +from pathlib import Path +import sys +from typing import Dict +import json + +current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + + +def get_fake_data_dir() -> str: + return (current_dir / ".." / "data").resolve() + + +def get_fake_project() -> Dict: + with (get_fake_data_dir() / "fake-project.json").open() as fp: + return json.load(fp) diff --git a/services/web/server/tests/unit/conftest.py b/services/web/server/tests/unit/conftest.py index 2fe61ca1016..927570e9fb1 100644 --- a/services/web/server/tests/unit/conftest.py +++ b/services/web/server/tests/unit/conftest.py @@ -14,12 +14,11 @@ from asyncio import Future from pathlib import Path from typing import Dict -from uuid import uuid4 import pytest from simcore_service_webserver.resources import resources -from simcore_service_webserver.utils import now_str +from pytest_simcore.helpers.utils_projects import empty_project_data ## current directory current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent @@ -42,12 +41,6 @@ def fake_static_dir(fake_data_dir: Path) -> Dict: return fake_data_dir / "static" -@pytest.fixture -def fake_project(fake_data_dir: Path) -> Dict: - with (fake_data_dir / "fake-project.json").open() as fp: - yield json.load(fp) - - @pytest.fixture def api_version_prefix() -> str: return "v0" @@ -55,20 +48,10 @@ def api_version_prefix() -> str: @pytest.fixture def empty_project(): - def create(): - empty_project = { - "uuid": f"project-{uuid4()}", - "name": "Empty name", - "description": "some description of an empty project", - "prjOwner": "I'm the empty project owner, hi!", - "creationDate": now_str(), - "lastChangeDate": now_str(), - "thumbnail": "", - "workbench": {}, - } - return empty_project - - return create + def factory(): + return empty_project_data() + + return factory @pytest.fixture diff --git a/services/web/server/tests/unit/with_dbs/_helpers.py b/services/web/server/tests/unit/with_dbs/_helpers.py index 5da6a29e1e3..00443e1df7a 100644 --- a/services/web/server/tests/unit/with_dbs/_helpers.py +++ b/services/web/server/tests/unit/with_dbs/_helpers.py @@ -1,4 +1,3 @@ -from asyncio import Future from collections import namedtuple from typing import List, Tuple diff --git a/services/web/server/tests/unit/with_dbs/fast/test_garbage_collector.py b/services/web/server/tests/unit/with_dbs/fast/test_garbage_collector.py deleted file mode 100644 index f26abdc1f04..00000000000 --- a/services/web/server/tests/unit/with_dbs/fast/test_garbage_collector.py +++ /dev/null @@ -1,63 +0,0 @@ -# TODO: tests for garbage collector -# - a User with more then 2 projects -# - a user without projects -# - a user with just 1 project -# -# The user can be: -# - connected via browser (websocket connection is up) -# - disconnected (no websocket connection) -import pytest -from copy import deepcopy - -from pytest_simcore.helpers.utils_assert import assert_status -from pytest_simcore.helpers.utils_login import LoggedUser, create_user, log_client_in -from pytest_simcore.helpers.utils_projects import NewProject, delete_all_projects - - -DEFAULT_GARBAGE_COLLECTOR_INTERVAL_SECONDS: int = 3 -DEFAULT_GARBAGE_COLLECTOR_DELETION_TIMEOUT_SECONDS: int = 3 - - - -@pytest.fixture(scope="function") -def app_cfg(default_app_cfg, aiohttp_unused_port): - """ OVERRIDES services/web/server/tests/unit/with_dbs/conftest.py:app_cfg fixture - to create a webserver with customized config - """ - cfg = deepcopy(default_app_cfg) - - # fills ports on the fly - cfg["main"]["port"] = aiohttp_unused_port() - cfg["storage"]["port"] = aiohttp_unused_port() - - cfg["projects"]["enabled"] = True - cfg["director"]["enabled"] = True - cfg["diagnostics"]["enabled"] = False - cfg["tags"]["enabled"] = False - - cfg["resource_manager"]["enabled"] = True - cfg["resource_manager"][ - "garbage_collection_interval_seconds" - ] = DEFAULT_GARBAGE_COLLECTOR_INTERVAL_SECONDS # increase speed of garbage collection - cfg["resource_manager"][ - "resource_deletion_timeout_seconds" - ] = DEFAULT_GARBAGE_COLLECTOR_DELETION_TIMEOUT_SECONDS # reduce deletion delay - - - import logging - log_level = getattr(logging, cfg["main"]["log_level"]) - logging.root.setLevel(log_level) - # this fixture can be safely modified during test since it is renovated on every call - return cfg - -from aiohttp import web - - - -async def test_webserver_config(client, api_version_prefix): - resp = await client.get(f"/{api_version_prefix}/config") - - data, error = await assert_status(resp, web.HTTPOk) - - assert data - assert not error diff --git a/services/web/server/tests/unit/with_dbs/medium/test_resource_manager.py b/services/web/server/tests/unit/with_dbs/medium/test_resource_manager.py index f474f8cf245..8ca3d0b3799 100644 --- a/services/web/server/tests/unit/with_dbs/medium/test_resource_manager.py +++ b/services/web/server/tests/unit/with_dbs/medium/test_resource_manager.py @@ -36,9 +36,6 @@ GARBAGE_COLLECTOR_INTERVAL = 1 SERVICE_DELETION_DELAY = 1 -# SEE https://github.com/miguelgrinberg/python-socketio/releases -SIO_VERSION = tuple(int(digit) for digit in socketio.__version__.split(".")) - @pytest.fixture def client(loop, aiohttp_client, app_cfg, postgres_db, mock_orphaned_services): @@ -127,6 +124,7 @@ async def close_project(client, project_uuid: str, client_session_id: str) -> No resp = await client.post(url, json=client_session_id) await assert_status(resp, web.HTTPNoContent) + # ------------------------ TESTS ------------------------------- async def test_anonymous_websocket_connection( client_session_id: str, diff --git a/services/web/server/tests/unit/with_dbs/slow/test_projects.py b/services/web/server/tests/unit/with_dbs/slow/test_projects.py index e1228115652..3093d34a1cb 100644 --- a/services/web/server/tests/unit/with_dbs/slow/test_projects.py +++ b/services/web/server/tests/unit/with_dbs/slow/test_projects.py @@ -1413,6 +1413,7 @@ async def test_open_shared_project_at_same_time( user_role: UserRole, expected: ExpectedResponse, aiohttp_client, + disable_gc_manual_guest_users, ): NUMBER_OF_ADDITIONAL_CLIENTS = 20 # log client 1