Skip to content

Bugfix/apiserver does not need sslheaders #1564

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,8 @@ export WEBSERVER_API_VERSION := $(shell cat $(CURDIR)/services/web/server/VERSI


# swarm stacks
export SWARM_STACK_NAME ?= simcore
export SWARM_STACK_NAME ?= master-simcore
export SWARM_STACK_NAME_NO_HYPHEN = $(subst -,_,$(SWARM_STACK_NAME))

# version tags
export DOCKER_IMAGE_TAG ?= latest
Expand Down
3 changes: 3 additions & 0 deletions ci/github/system-testing/e2e.bash
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ IFS=$'\n\t'
DOCKER_IMAGE_TAG=$(exec ci/helpers/build_docker_image_tag.bash)
export DOCKER_IMAGE_TAG

SWARM_STACK_NAME=e2e_test_stack
export SWARM_STACK_NAME

install() {
echo "--------------- installing psql client..."
/bin/bash -c 'sudo apt install -y postgresql-client'
Expand Down
7 changes: 3 additions & 4 deletions services/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,12 @@ services:
# gzip compression
- traefik.http.middlewares.${SWARM_STACK_NAME}_gzip.compress=true
# ssl header necessary so that socket.io upgrades correctly from polling to websocket mode. the middleware must be attached to the right connection.
- traefik.http.middlewares.${SWARM_STACK_NAME}_sslheader.headers.customrequestheaders.X-Forwarded-Proto=http
- traefik.enable=true
- traefik.http.services.${SWARM_STACK_NAME}_api-server.loadbalancer.server.port=8000
- traefik.http.routers.${SWARM_STACK_NAME}_api-server.rule=hostregexp(`{host:.+}`)
- traefik.http.routers.${SWARM_STACK_NAME}_api-server.entrypoints=simcore_api
- traefik.http.routers.${SWARM_STACK_NAME}_api-server.priority=1
- traefik.http.routers.${SWARM_STACK_NAME}_api-server.middlewares=${SWARM_STACK_NAME}_gzip@docker, ${SWARM_STACK_NAME}_sslheader
- traefik.http.routers.${SWARM_STACK_NAME}_api-server.middlewares=${SWARM_STACK_NAME}_gzip@docker
networks:
- default

Expand Down Expand Up @@ -116,13 +115,13 @@ services:
# gzip compression
- traefik.http.middlewares.${SWARM_STACK_NAME}_gzip.compress=true
# ssl header necessary so that socket.io upgrades correctly from polling to websocket mode. the middleware must be attached to the right connection.
- traefik.http.middlewares.${SWARM_STACK_NAME}_sslheader.headers.customrequestheaders.X-Forwarded-Proto=http
- traefik.http.middlewares.${SWARM_STACK_NAME_NO_HYPHEN}_sslheader.headers.customrequestheaders.X-Forwarded-Proto=http
- traefik.enable=true
- traefik.http.services.${SWARM_STACK_NAME}_webserver.loadbalancer.server.port=8080
- traefik.http.routers.${SWARM_STACK_NAME}_webserver.rule=hostregexp(`{host:.+}`)
- traefik.http.routers.${SWARM_STACK_NAME}_webserver.entrypoints=http
- traefik.http.routers.${SWARM_STACK_NAME}_webserver.priority=1
- traefik.http.routers.${SWARM_STACK_NAME}_webserver.middlewares=${SWARM_STACK_NAME}_gzip@docker, ${SWARM_STACK_NAME}_sslheader
- traefik.http.routers.${SWARM_STACK_NAME}_webserver.middlewares=${SWARM_STACK_NAME}_gzip@docker, ${SWARM_STACK_NAME_NO_HYPHEN}_sslheader@docker
networks:
- default
- interactive_services_subnet
Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ transfer-images-to-registry: ## transfer images to registry
# completed transfer of images
curl registry:5000/v2/_catalog

PUBLISHED_PORT = $(shell docker inspect simcore_postgres --format "{{(index .Endpoint.Ports 0).PublishedPort}}")
PUBLISHED_PORT = $(shell docker inspect $(shell docker service ls --format "{{ .Name }}" | grep postgres) --format "{{(index .Endpoint.Ports 0).PublishedPort}}")
.PHONY: inject-templates-in-db
inject-templates-in-db: ## inject project templates
@PGPASSWORD=adminadmin psql --host localhost \
Expand Down
84 changes: 53 additions & 31 deletions tests/e2e/utils/wait_for_services.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import logging
from pdb import Pdb
import sys
import time
from pathlib import Path
Expand All @@ -10,20 +11,14 @@

logger = logging.getLogger(__name__)

current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent
current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent

WAIT_TIME_SECS = 20
RETRY_COUNT = 7
MAX_WAIT_TIME=240
MAX_WAIT_TIME = 240

# https://docs.docker.com/engine/swarm/how-swarm-mode-works/swarm-task-states/
pre_states = [
"NEW",
"PENDING",
"ASSIGNED",
"PREPARING",
"STARTING"
]
pre_states = ["NEW", "PENDING", "ASSIGNED", "PREPARING", "STARTING"]

failed_states = [
"COMPLETE",
Expand All @@ -32,37 +27,44 @@
"REJECTED",
"ORPHANED",
"REMOVE",
"CREATED"
"CREATED",
]
# UTILS --------------------------------


def get_tasks_summary(tasks):
msg = ""
for t in tasks:
t["Status"].setdefault("Err", '')
t["Status"].setdefault("Err", "")
msg += "- task ID:{ID}, STATE: {Status[State]}, ERROR: '{Status[Err]}' \n".format(
**t)
**t
)
return msg


def get_failed_tasks_logs(service, docker_client):
failed_logs = ""
for t in service.tasks():
if t['Status']['State'].upper() in failed_states:
cid = t['Status']['ContainerStatus']['ContainerID']
if t["Status"]["State"].upper() in failed_states:
cid = t["Status"]["ContainerStatus"]["ContainerID"]
failed_logs += "{2} {0} - {1} BEGIN {2}\n".format(
service.name, t['ID'], "="*10)
service.name, t["ID"], "=" * 10
)
if cid:
container = docker_client.containers.get(cid)
failed_logs += container.logs().decode('utf-8')
failed_logs += container.logs().decode("utf-8")
else:
failed_logs += " log unavailable. container does not exists\n"
failed_logs += "{2} {0} - {1} END {2}\n".format(
service.name, t['ID'], "="*10)
service.name, t["ID"], "=" * 10
)

return failed_logs


# --------------------------------------------------------------------------------


def osparc_simcore_root_dir() -> Path:
WILDCARD = "services/web/server"

Expand All @@ -81,46 +83,66 @@ def osparc_simcore_root_dir() -> Path:
def core_docker_compose_file() -> Path:
return osparc_simcore_root_dir() / ".stack-simcore-version.yml"


def core_services() -> List[str]:
with core_docker_compose_file().open() as fp:
dc_specs = yaml.safe_load(fp)
return [x for x in dc_specs["services"].keys()]


def ops_docker_compose_file() -> Path:
return osparc_simcore_root_dir() / ".stack-ops.yml"


def ops_services() -> List[str]:
with ops_docker_compose_file().open() as fp:
dc_specs = yaml.safe_load(fp)
return [x for x in dc_specs["services"].keys()]

def wait_for_services() -> bool:

def wait_for_services() -> None:
# get all services
services = core_services() + ops_services()

client = docker.from_env()
running_services = [x for x in client.services.list() if x.name.split("_")[1] in services]
running_services = [
x for x in client.services.list() if x.name.split("_")[-1] in services
]

# check all services are in
assert len(running_services), "no services started!"
assert len(services) == len(running_services), "Some services are missing"
assert len(services) == len(
running_services
), f"Some services are missing:\nexpected: {services}\ngot: {running_services}"
# now check they are in running mode
for service in running_services:
task = None
for n in range(RETRY_COUNT):
task = service.tasks()[0]
if task['Status']['State'].upper() in pre_states:
print("Waiting [{}/{}] for {}...\n{}".format(n, RETRY_COUNT, service.name, get_tasks_summary(service.tasks())))
# get last updated task
sorted_tasks = sorted(service.tasks(), key=lambda task: task["UpdatedAt"])
task = sorted_tasks[-1]

if task["Status"]["State"].upper() in pre_states:
print(
"Waiting [{}/{}] for {}...\n{}".format(
n, RETRY_COUNT, service.name, get_tasks_summary(service.tasks())
)
)
time.sleep(WAIT_TIME_SECS)
elif task['Status']['State'].upper() in failed_states:
print(f"Waiting [{n}/{RETRY_COUNT}] Service {service.name} failed once...\n{get_tasks_summary(service.tasks())}")
elif task["Status"]["State"].upper() in failed_states:
print(
f"Waiting [{n}/{RETRY_COUNT}] Service {service.name} failed once...\n{get_tasks_summary(service.tasks())}"
)
time.sleep(WAIT_TIME_SECS)
else:
break
assert task['Status']['State'].upper() == "RUNNING",\
"Expected running, got \n{}\n{}".format(
pformat(task),
get_tasks_summary(service.tasks()))
# get_failed_tasks_logs(service, client))

assert task
assert (
task["Status"]["State"].upper() == "RUNNING"
), "Expected running, got \n{}\n{}".format(
pformat(task), get_tasks_summary(service.tasks())
)
# get_failed_tasks_logs(service, client))


if __name__ == "__main__":
Expand Down