-
Notifications
You must be signed in to change notification settings - Fork 11
/
Copy pathauth_proxy.py
609 lines (532 loc) · 25.4 KB
/
auth_proxy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
import json
import logging
import os
import re
import subprocess
import sys
from functools import cache
from io import BytesIO
from typing import Dict, Optional, Tuple
from urllib.parse import urlparse, urlunparse
import boto3
import requests
from botocore.awsrequest import AWSPreparedRequest, AWSResponse
from botocore.httpchecksum import resolve_checksum_context
from botocore.model import OperationModel
from localstack import config
from localstack import config as localstack_config
from localstack.aws.chain import HandlerChain
from localstack.aws.chain import RequestContext as AwsRequestContext
from localstack.aws.gateway import Gateway
from localstack.aws.protocol.parser import create_parser
from localstack.aws.spec import load_service
from localstack.config import external_service_url
from localstack.constants import AWS_REGION_US_EAST_1, DOCKER_IMAGE_NAME_PRO
from localstack.http import Request
from localstack.http import Response as HttpResponse
from localstack.http.hypercorn import GatewayServer
from localstack.utils.aws.aws_responses import requests_response
from localstack.utils.bootstrap import setup_logging
from localstack.utils.collections import select_attributes
from localstack.utils.container_utils.container_client import PortMappings
from localstack.utils.docker_utils import DOCKER_CLIENT, reserve_available_container_port
from localstack.utils.files import new_tmp_file, save_file
from localstack.utils.functions import run_safe
from localstack.utils.net import get_docker_host_from_container, get_free_tcp_port
from localstack.utils.serving import Server
from localstack.utils.strings import short_uid, to_bytes, to_str, truncate
from localstack_ext.bootstrap.licensingv2 import ENV_LOCALSTACK_API_KEY, ENV_LOCALSTACK_AUTH_TOKEN
from requests import Response
from aws_replicator import config as repl_config
from aws_replicator.client.utils import truncate_content
from aws_replicator.config import HANDLER_PATH_PROXIES
from aws_replicator.shared.models import AddProxyRequest, ProxyConfig
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
if config.DEBUG:
LOG.setLevel(logging.DEBUG)
# TODO make configurable
CLI_PIP_PACKAGE = "localstack-extension-aws-replicator"
# note: enable the line below temporarily for testing:
# CLI_PIP_PACKAGE = "git+https://github.com/localstack/localstack-extensions/@branch#egg=localstack-extension-aws-replicator&subdirectory=aws-replicator"
CONTAINER_NAME_PREFIX = "ls-aws-proxy-"
CONTAINER_CONFIG_FILE = "/tmp/ls.aws.proxy.yml"
CONTAINER_LOG_FILE = "/tmp/ls-aws-proxy.log"
# default bind host if `bind_host` is not specified for the proxy
DEFAULT_BIND_HOST = "127.0.0.1"
class AwsProxyHandler:
"""
A handler for an AWS Handler chain that attempts to forward the request using a specific boto3 session.
This can be used to proxy incoming requests to real AWS.
"""
def __init__(self, session: boto3.Session = None):
self.session = session or boto3.Session()
def __call__(self, chain: HandlerChain, context: AwsRequestContext, response: HttpResponse):
# prepare the API invocation parameters
LOG.info(
"Received %s.%s = %s",
context.service.service_name,
context.operation.name,
context.service_request,
)
# make the actual API call against upstream AWS (will also calculate a new auth signature)
try:
aws_response = self._make_aws_api_call(context)
except Exception:
LOG.exception(
"Exception while proxying %s.%s to AWS",
context.service.service_name,
context.operation.name,
)
raise
# tell the handler chain to respond
LOG.info(
"AWS Response %s.%s: url=%s status_code=%s, headers=%s, content=%s",
context.service.service_name,
context.operation.name,
aws_response.url,
aws_response.status_code,
aws_response.headers,
aws_response.content,
)
chain.respond(aws_response.status_code, aws_response.content, dict(aws_response.headers))
def _make_aws_api_call(self, context: AwsRequestContext) -> AWSResponse:
# TODO: reconcile with AwsRequestProxy from localstack, and other forwarder tools
# create a real AWS client
client = self.session.client(context.service.service_name, region_name=context.region)
operation_model = context.operation
# prepare API request parameters as expected by boto
api_params = {k: v for k, v in context.service_request.items() if v is not None}
# this is a stripped down version of botocore's client._make_api_call to immediately get the HTTP
# response instead of a parsed response.
request_context = {
"client_region": client.meta.region_name,
"client_config": client.meta.config,
"has_streaming_input": operation_model.has_streaming_input,
"auth_type": operation_model.auth_type,
}
(
endpoint_url,
additional_headers,
properties,
) = client._resolve_endpoint_ruleset(operation_model, api_params, request_context)
if properties:
# Pass arbitrary endpoint info with the Request
# for use during construction.
request_context["endpoint_properties"] = properties
request_dict = client._convert_to_request_dict(
api_params=api_params,
operation_model=operation_model,
endpoint_url=endpoint_url,
context=request_context,
headers=additional_headers,
)
resolve_checksum_context(request_dict, operation_model, api_params)
if operation_model.has_streaming_input:
request_dict["body"] = request_dict["body"].read()
self._adjust_request_dict(context.service.service_name, request_dict)
if operation_model.has_streaming_input:
request_dict["body"] = BytesIO(request_dict["body"])
LOG.info("Making AWS request %s", request_dict)
http, _ = client._endpoint.make_request(operation_model, request_dict)
http: AWSResponse
# for some elusive reasons, these header modifications are needed (were part of http2_server)
http.headers.pop("Date", None)
http.headers.pop("Server", None)
if operation_model.has_streaming_output:
http.headers.pop("Content-Length", None)
return http
def _adjust_request_dict(self, service_name: str, request_dict: Dict):
"""Apply minor fixes to the request dict, which seem to be required in the current setup."""
# TODO: replacing localstack-specific URLs, IDs, etc, should ideally be done in a more generalized
# way.
req_body = request_dict.get("body")
# TODO: fix for switch between path/host addressing
# Note: the behavior seems to be different across botocore versions. Seems to be working
# with 1.29.97 (fix below not required) whereas newer versions like 1.29.151 require the fix.
if service_name == "s3":
body_str = run_safe(lambda: to_str(req_body)) or ""
request_url = request_dict["url"]
url_parsed = list(urlparse(request_url))
path_parts = url_parsed[2].strip("/").split("/")
bucket_subdomain_prefix = f"://{path_parts[0]}.s3."
if bucket_subdomain_prefix in request_url:
prefix = f"/{path_parts[0]}"
url_parsed[2] = url_parsed[2].removeprefix(prefix)
request_dict["url_path"] = request_dict["url_path"].removeprefix(prefix)
# replace empty path with "/" (seems required for signature calculation)
request_dict["url_path"] = request_dict["url_path"] or "/"
url_parsed[2] = url_parsed[2] or "/"
# re-construct final URL
request_dict["url"] = urlunparse(url_parsed)
# TODO: this custom fix should not be required - investigate and remove!
if "<CreateBucketConfiguration" in body_str and "LocationConstraint" not in body_str:
region = request_dict["context"]["client_region"]
if region == AWS_REGION_US_EAST_1:
request_dict["body"] = ""
else:
request_dict["body"] = (
'<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
f"<LocationConstraint>{region}</LocationConstraint></CreateBucketConfiguration>"
)
if service_name == "sqs" and isinstance(req_body, dict):
account_id = self._query_account_id_from_aws()
if "QueueUrl" in req_body:
queue_name = req_body["QueueUrl"].split("/")[-1]
req_body["QueueUrl"] = f"https://queue.amazonaws.com/{account_id}/{queue_name}"
if "QueueOwnerAWSAccountId" in req_body:
req_body["QueueOwnerAWSAccountId"] = account_id
if service_name == "sqs" and request_dict.get("url"):
req_json = run_safe(lambda: json.loads(body_str)) or {}
account_id = self._query_account_id_from_aws()
queue_name = req_json.get("QueueName")
if account_id and queue_name:
request_dict["url"] = f"https://queue.amazonaws.com/{account_id}/{queue_name}"
req_json["QueueOwnerAWSAccountId"] = account_id
request_dict["body"] = to_bytes(json.dumps(req_json))
def _fix_headers(self, request: Request, service_name: str):
if service_name == "s3":
# fix the Host header, to avoid bucket addressing issues
host = request.headers.get("Host") or ""
regex = r"^(https?://)?([0-9.]+|localhost)(:[0-9]+)?"
if re.match(regex, host):
request.headers["Host"] = re.sub(regex, r"\1s3.localhost.localstack.cloud", host)
request.headers.pop("Content-Length", None)
request.headers.pop("x-localstack-request-url", None)
request.headers.pop("X-Forwarded-For", None)
request.headers.pop("X-Localstack-Tgt-Api", None)
request.headers.pop("X-Moto-Account-Id", None)
request.headers.pop("Remote-Addr", None)
@cache
def _query_account_id_from_aws(self) -> str:
sts_client = self.session.client("sts")
result = sts_client.get_caller_identity()
return result["Account"]
class AwsProxyGateway(Gateway):
"""
A handler chain that receives AWS requests, and proxies them transparently to upstream AWS using real
credentials. It de-constructs the incoming request, and creates a new request signed with the AWS
credentials configured in the environment.
"""
def __init__(self) -> None:
from localstack.aws import handlers
super().__init__(
request_handlers=[
handlers.parse_service_name,
handlers.content_decoder,
handlers.add_region_from_header,
handlers.add_account_id,
handlers.parse_service_request,
AwsProxyHandler(),
],
exception_handlers=[
handlers.log_exception,
handlers.handle_internal_failure,
],
context_class=AwsRequestContext,
)
class AuthProxyAWS(Server):
def __init__(self, config: ProxyConfig, port: int = None):
self.config = config
port = port or get_free_tcp_port()
super().__init__(port=port)
def do_run(self):
self.register_in_instance()
bind_host = self.config.get("bind_host") or DEFAULT_BIND_HOST
srv = GatewayServer(AwsProxyGateway(), localstack_config.HostAndPort(bind_host, self.port))
srv.start()
srv.join()
# proxy = run_server(port=self.port, bind_addresses=[bind_host], handler=self.proxy_request)
# proxy.join()
def proxy_request(self, request: Request, data: bytes) -> Response:
parsed = self._extract_region_and_service(request.headers)
if not parsed:
return requests_response("", status_code=400)
region_name, service_name = parsed
query_string = to_str(request.query_string or "")
LOG.debug(
"Proxying request to %s (%s): %s %s %s",
service_name,
region_name,
request.method,
request.path,
query_string,
)
request = Request(
body=data,
method=request.method,
headers=request.headers,
path=request.path,
query_string=query_string,
)
session = boto3.Session()
client = session.client(service_name, region_name=region_name)
# fix headers (e.g., "Host") and create client
self._fix_headers(request, service_name)
# create request and request dict
operation_model, aws_request, request_dict = self._parse_aws_request(
request, service_name, region_name=region_name, client=client
)
# adjust request dict and fix certain edge cases in the request
self._adjust_request_dict(service_name, request_dict)
headers_truncated = {k: truncate(to_str(v)) for k, v in dict(aws_request.headers).items()}
LOG.debug(
"Sending request for service %s to AWS: %s %s - %s - %s",
service_name,
request.method,
aws_request.url,
truncate_content(request_dict.get("body"), max_length=500),
headers_truncated,
)
try:
# send request to upstream AWS
result = client._endpoint.make_request(operation_model, request_dict)
# create response object - TODO: to be replaced with localstack.http.Response over time
response = requests_response(
result[0].content,
status_code=result[0].status_code,
headers=dict(result[0].headers),
)
LOG.debug(
"Received response for service %s from AWS: %s - %s",
service_name,
response.status_code,
truncate_content(response.content, max_length=500),
)
return response
except Exception as e:
if LOG.isEnabledFor(logging.DEBUG):
LOG.exception("Error when making request to AWS service %s: %s", service_name, e)
return requests_response("", status_code=400)
def register_in_instance(self):
port = getattr(self, "port", None)
if not port:
raise Exception("Proxy currently not running")
url = f"{external_service_url()}{HANDLER_PATH_PROXIES}"
data = AddProxyRequest(port=port, config=self.config)
LOG.debug("Registering new proxy in main container via: %s", url)
try:
response = requests.post(url, json=data)
assert response.ok
return response
except Exception:
LOG.warning(
"Unable to register auth proxy - is LocalStack running with the extension enabled?"
)
raise
def _parse_aws_request(
self, request: Request, service_name: str, region_name: str, client
) -> Tuple[OperationModel, AWSPreparedRequest, Dict]:
parser = create_parser(load_service(service_name))
operation_model, parsed_request = parser.parse(request)
request_context = {
"client_region": region_name,
"has_streaming_input": operation_model.has_streaming_input,
"auth_type": operation_model.auth_type,
"client_config": client.meta.config,
}
parsed_request = {} if parsed_request is None else parsed_request
parsed_request = {k: v for k, v in parsed_request.items() if v is not None}
# get endpoint info
endpoint_info = client._resolve_endpoint_ruleset(
operation_model, parsed_request, request_context
)
# switch for https://github.com/boto/botocore/commit/826b78c54dd87b9da368e9ab6017d8c4823b28c1
if len(endpoint_info) == 3:
endpoint_url, additional_headers, properties = endpoint_info
if properties:
request_context["endpoint_properties"] = properties
else:
endpoint_url, additional_headers = endpoint_info
# create request dict
request_dict = client._convert_to_request_dict(
parsed_request,
operation_model,
endpoint_url=endpoint_url,
context=request_context,
headers=additional_headers,
)
# TODO: fix for switch between path/host addressing
# Note: the behavior seems to be different across botocore versions. Seems to be working
# with 1.29.97 (fix below not required) whereas newer versions like 1.29.151 require the fix.
if service_name == "s3":
request_url = request_dict["url"]
url_parsed = list(urlparse(request_url))
path_parts = url_parsed[2].strip("/").split("/")
bucket_subdomain_prefix = f"://{path_parts[0]}.s3."
if bucket_subdomain_prefix in request_url:
prefix = f"/{path_parts[0]}"
url_parsed[2] = url_parsed[2].removeprefix(prefix)
request_dict["url_path"] = request_dict["url_path"].removeprefix(prefix)
# replace empty path with "/" (seems required for signature calculation)
request_dict["url_path"] = request_dict["url_path"] or "/"
url_parsed[2] = url_parsed[2] or "/"
# re-construct final URL
request_dict["url"] = urlunparse(url_parsed)
aws_request = client._endpoint.create_request(request_dict, operation_model)
return operation_model, aws_request, request_dict
def _adjust_request_dict(self, service_name: str, request_dict: Dict):
"""Apply minor fixes to the request dict, which seem to be required in the current setup."""
# TODO: replacing localstack-specific URLs, IDs, etc, should ideally be done in a more generalized
# way.
req_body = request_dict.get("body")
if service_name == "s3":
body_str = run_safe(lambda: to_str(req_body)) or ""
# TODO: this custom fix should not be required - investigate and remove!
if "<CreateBucketConfiguration" in body_str and "LocationConstraint" not in body_str:
region = request_dict["context"]["client_region"]
if region == AWS_REGION_US_EAST_1:
request_dict["body"] = ""
else:
request_dict["body"] = (
'<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
f"<LocationConstraint>{region}</LocationConstraint></CreateBucketConfiguration>"
)
if service_name == "sqs" and isinstance(req_body, dict):
account_id = self._query_account_id_from_aws()
if "QueueUrl" in req_body:
queue_name = req_body["QueueUrl"].split("/")[-1]
req_body["QueueUrl"] = f"https://queue.amazonaws.com/{account_id}/{queue_name}"
if "QueueOwnerAWSAccountId" in req_body:
req_body["QueueOwnerAWSAccountId"] = account_id
if service_name == "sqs" and request_dict.get("url"):
req_json = run_safe(lambda: json.loads(body_str)) or {}
account_id = self._query_account_id_from_aws()
queue_name = req_json.get("QueueName")
if account_id and queue_name:
request_dict["url"] = f"https://queue.amazonaws.com/{account_id}/{queue_name}"
req_json["QueueOwnerAWSAccountId"] = account_id
request_dict["body"] = to_bytes(json.dumps(req_json))
def _fix_headers(self, request: Request, service_name: str):
if service_name == "s3":
# fix the Host header, to avoid bucket addressing issues
host = request.headers.get("Host") or ""
regex = r"^(https?://)?([0-9.]+|localhost)(:[0-9]+)?"
if re.match(regex, host):
request.headers["Host"] = re.sub(regex, r"\1s3.localhost.localstack.cloud", host)
request.headers.pop("Content-Length", None)
request.headers.pop("x-localstack-request-url", None)
request.headers.pop("X-Forwarded-For", None)
request.headers.pop("X-Localstack-Tgt-Api", None)
request.headers.pop("X-Moto-Account-Id", None)
request.headers.pop("Remote-Addr", None)
def _extract_region_and_service(self, headers) -> Optional[Tuple[str, str]]:
auth_header = headers.pop("Authorization", "")
parts = auth_header.split("Credential=", maxsplit=1)
if len(parts) < 2:
return
parts = parts[1].split("/")
if len(parts) < 5:
return
return parts[2], parts[3]
@cache
def _query_account_id_from_aws(self) -> str:
session = boto3.Session()
sts_client = session.client("sts")
result = sts_client.get_caller_identity()
return result["Account"]
def start_aws_auth_proxy(config: ProxyConfig, port: int = None) -> AuthProxyAWS:
setup_logging()
proxy = AuthProxyAWS(config, port=port)
proxy.start()
return proxy
def start_aws_auth_proxy_in_container(
config: ProxyConfig, env_vars: dict = None, port: int = None, quiet: bool = False
):
"""
Run the auth proxy in a separate local container. This can help in cases where users
are running into version/dependency issues on their host machines.
"""
# TODO: Currently running a container and installing the extension on the fly - we
# should consider building pre-baked images for the extension in the future. Also,
# the new packaged CLI binary can help us gain more stability over time...
logging.getLogger("localstack.utils.container_utils.docker_cmd_client").setLevel(logging.INFO)
logging.getLogger("localstack.utils.docker_utils").setLevel(logging.INFO)
logging.getLogger("localstack.utils.run").setLevel(logging.INFO)
print("Proxy container is starting up...")
# determine port mapping
localstack_config.PORTS_CHECK_DOCKER_IMAGE = DOCKER_IMAGE_NAME_PRO
port = port or reserve_available_container_port()
ports = PortMappings()
ports.add(port, port)
# create container
container_name = f"{CONTAINER_NAME_PREFIX}{short_uid()}"
image_name = DOCKER_IMAGE_NAME_PRO
DOCKER_CLIENT.create_container(
image_name,
name=container_name,
entrypoint="",
command=["bash", "-c", f"touch {CONTAINER_LOG_FILE}; tail -f {CONTAINER_LOG_FILE}"],
ports=ports,
additional_flags=repl_config.PROXY_DOCKER_FLAGS,
)
# start container in detached mode
DOCKER_CLIENT.start_container(container_name, attach=False)
# install extension CLI package
venv_activate = ". .venv/bin/activate"
command = [
"bash",
"-c",
f"{venv_activate}; pip install --upgrade --no-deps '{CLI_PIP_PACKAGE}'",
]
DOCKER_CLIENT.exec_in_container(container_name, command=command)
# create config file in container
config_file_host = new_tmp_file()
save_file(config_file_host, json.dumps(config))
DOCKER_CLIENT.copy_into_container(
container_name, config_file_host, container_path=CONTAINER_CONFIG_FILE
)
# prepare environment variables
env_var_names = [
"DEBUG",
"AWS_SECRET_ACCESS_KEY",
"AWS_ACCESS_KEY_ID",
"AWS_SESSION_TOKEN",
"AWS_DEFAULT_REGION",
ENV_LOCALSTACK_API_KEY,
ENV_LOCALSTACK_AUTH_TOKEN,
]
env_vars = env_vars or os.environ
env_vars = select_attributes(dict(env_vars), env_var_names)
# Determine target hostname - we make the host configurable via PROXY_LOCALSTACK_HOST,
# and if not configured then use get_docker_host_from_container() as a fallback.
target_host = repl_config.PROXY_LOCALSTACK_HOST
if not repl_config.PROXY_LOCALSTACK_HOST:
target_host = get_docker_host_from_container()
env_vars["LOCALSTACK_HOST"] = target_host
try:
print("Proxy container is ready.")
command = f"{venv_activate}; localstack aws proxy -c {CONTAINER_CONFIG_FILE} -p {port} --host 0.0.0.0 > {CONTAINER_LOG_FILE} 2>&1"
if quiet:
DOCKER_CLIENT.exec_in_container(
container_name, command=["bash", "-c", command], env_vars=env_vars, interactive=True
)
else:
env_vars_list = []
for key, value in env_vars.items():
env_vars_list += ["-e", f"{key}={value}"]
# note: using docker command directly, as our Docker client doesn't fully support log piping yet
command = [
"docker",
"exec",
"-it",
*env_vars_list,
container_name,
"bash",
"-c",
command,
]
subprocess.run(command, stdout=sys.stdout, stderr=sys.stderr)
except KeyboardInterrupt:
pass
except Exception as e:
LOG.info("Error: %s", e)
if isinstance(e, subprocess.CalledProcessError):
LOG.info("Error in called process - output: %s\n%s", e.stdout, e.stderr)
finally:
try:
if repl_config.CLEANUP_PROXY_CONTAINERS:
DOCKER_CLIENT.remove_container(container_name, force=True)
except Exception as e:
if "already in progress" not in str(e):
raise