Skip to content

Commit 67ba861

Browse files
authored
refactor!: move api evaluation context methods to api module (#164)
* refactor: move api evaluation_context methods to api module Signed-off-by: Federico Bond <[email protected]> * refactor: rename api_evaluation_context and set_api_evaluation_context Signed-off-by: Federico Bond <[email protected]> --------- Signed-off-by: Federico Bond <[email protected]>
1 parent ae412f7 commit 67ba861

6 files changed

+53
-56
lines changed

open_feature/open_feature_api.py

+15
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import typing
22

3+
from open_feature.evaluation_context.evaluation_context import EvaluationContext
34
from open_feature.exception.exceptions import GeneralError
45
from open_feature.open_feature_client import OpenFeatureClient
56
from open_feature.provider.metadata import Metadata
@@ -8,6 +9,8 @@
89

910
_provider: AbstractProvider = NoOpProvider()
1011

12+
_evaluation_context = EvaluationContext()
13+
1114

1215
def get_client(
1316
name: typing.Optional[str] = None, version: typing.Optional[str] = None
@@ -30,3 +33,15 @@ def get_provider() -> typing.Optional[AbstractProvider]:
3033
def get_provider_metadata() -> typing.Optional[Metadata]:
3134
global _provider
3235
return _provider.get_metadata()
36+
37+
38+
def get_evaluation_context() -> EvaluationContext:
39+
global _evaluation_context
40+
return _evaluation_context
41+
42+
43+
def set_evaluation_context(evaluation_context: EvaluationContext):
44+
global _evaluation_context
45+
if evaluation_context is None:
46+
raise GeneralError(error_message="No api level evaluation context")
47+
_evaluation_context = evaluation_context

open_feature/open_feature_client.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import typing
33
from dataclasses import dataclass
44

5+
from open_feature import open_feature_api as api
56
from open_feature.evaluation_context.evaluation_context import EvaluationContext
67
from open_feature.exception.error_code import ErrorCode
78
from open_feature.exception.exceptions import (
@@ -23,7 +24,6 @@
2324
before_hooks,
2425
error_hooks,
2526
)
26-
from open_feature.open_feature_evaluation_context import api_evaluation_context
2727
from open_feature.provider.no_op_provider import NoOpProvider
2828
from open_feature.provider.provider import AbstractProvider
2929

@@ -280,7 +280,9 @@ def evaluate_flag_details(
280280

281281
# Requirement 3.2.2 merge: API.context->client.context->invocation.context
282282
merged_context = (
283-
api_evaluation_context().merge(self.context).merge(invocation_context)
283+
api.get_evaluation_context()
284+
.merge(self.context)
285+
.merge(invocation_context)
284286
)
285287

286288
flag_evaluation = self._create_provider_evaluation(

open_feature/open_feature_evaluation_context.py

-16
This file was deleted.

readme.md

+7-5
Original file line numberDiff line numberDiff line change
@@ -103,10 +103,12 @@ In OpenFeature, we refer to this as [`targeting`](https://openfeature.dev/specif
103103
If the flag system you're using supports targeting, you can provide the input data using the `EvaluationContext`.
104104

105105
```python
106-
from open_feature.open_feature_api import get_client, get_provider, set_provider
107-
from open_feature.open_feature_evaluation_context import (
108-
api_evaluation_context,
109-
set_api_evaluation_context,
106+
from open_feature.open_feature_api import (
107+
get_client,
108+
get_provider,
109+
set_provider
110+
get_evaluation_context,
111+
set_evaluation_context,
110112
)
111113

112114
global_context = EvaluationContext(
@@ -117,7 +119,7 @@ request_context = EvaluationContext(
117119
)
118120

119121
## set global context
120-
set_api_evaluation_context(first_context)
122+
set_evaluation_context(first_context)
121123

122124
# merge second context
123125
client = get_client(name="No-op Provider", version="0.5.2")

tests/test_open_feature_api.py

+27
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,15 @@
11
import pytest
22

3+
from open_feature.evaluation_context.evaluation_context import EvaluationContext
34
from open_feature.exception.error_code import ErrorCode
45
from open_feature.exception.exceptions import GeneralError
56
from open_feature.open_feature_api import (
67
get_client,
78
get_provider,
89
set_provider,
910
get_provider_metadata,
11+
get_evaluation_context,
12+
set_evaluation_context,
1013
)
1114
from open_feature.provider.metadata import Metadata
1215
from open_feature.provider.no_op_provider import NoOpProvider
@@ -70,3 +73,27 @@ def test_should_retrieve_metadata_for_configured_provider():
7073
# Then
7174
assert isinstance(metadata, Metadata)
7275
assert metadata.name == "No-op Provider"
76+
77+
78+
def test_should_raise_an_exception_if_no_evaluation_context_set():
79+
# Given
80+
with pytest.raises(GeneralError) as ge:
81+
set_evaluation_context(evaluation_context=None)
82+
# Then
83+
assert ge.value
84+
assert ge.value.error_message == "No api level evaluation context"
85+
assert ge.value.error_code == ErrorCode.GENERAL
86+
87+
88+
def test_should_successfully_set_evaluation_context_for_api():
89+
# Given
90+
evaluation_context = EvaluationContext("targeting_key", {"attr1": "val1"})
91+
92+
# When
93+
set_evaluation_context(evaluation_context)
94+
global_evaluation_context = get_evaluation_context()
95+
96+
# Then
97+
assert global_evaluation_context
98+
assert global_evaluation_context.targeting_key == evaluation_context.targeting_key
99+
assert global_evaluation_context.attributes == evaluation_context.attributes

tests/test_open_feature_evaluation_context.py

-33
This file was deleted.

0 commit comments

Comments
 (0)