diff --git a/redis/cluster.py b/redis/cluster.py index ec752741f3..546472357d 100644 --- a/redis/cluster.py +++ b/redis/cluster.py @@ -387,8 +387,7 @@ def __init__( port=6379, startup_nodes=None, cluster_error_retry_attempts=3, - require_full_coverage=True, - skip_full_coverage_check=False, + require_full_coverage=False, reinitialize_steps=10, read_from_replicas=False, url=None, @@ -404,16 +403,15 @@ def __init__( :port: 'int' Can be used to point to a startup node :require_full_coverage: 'bool' - If set to True, as it is by default, all slots must be covered. - If set to False and not all slots are covered, the instance - creation will succeed only if 'cluster-require-full-coverage' - configuration is set to 'no' in all of the cluster's nodes. - Otherwise, RedisClusterException will be thrown. - :skip_full_coverage_check: 'bool' - If require_full_coverage is set to False, a check of - cluster-require-full-coverage config will be executed against all - nodes. Set skip_full_coverage_check to True to skip this check. - Useful for clusters without the CONFIG command (like ElastiCache) + When set to False (default value): the client will not require a + full coverage of the slots. However, if not all slots are covered, + and at least one node has 'cluster-require-full-coverage' set to + 'yes,' the server will throw a ClusterDownError for some key-based + commands. See - + https://redis.io/topics/cluster-tutorial#redis-cluster-configuration-parameters + When set to True: all slots must be covered to construct the + cluster client. If not all slots are covered, RedisClusterException + will be thrown. :read_from_replicas: 'bool' Enable read from replicas in READONLY mode. You can read possibly stale data. @@ -510,7 +508,6 @@ def __init__( startup_nodes=startup_nodes, from_url=from_url, require_full_coverage=require_full_coverage, - skip_full_coverage_check=skip_full_coverage_check, **kwargs, ) @@ -1111,8 +1108,7 @@ def __init__( self, startup_nodes, from_url=False, - require_full_coverage=True, - skip_full_coverage_check=False, + require_full_coverage=False, lock=None, **kwargs, ): @@ -1123,7 +1119,6 @@ def __init__( self.populate_startup_nodes(startup_nodes) self.from_url = from_url self._require_full_coverage = require_full_coverage - self._skip_full_coverage_check = skip_full_coverage_check self._moved_exception = None self.connection_kwargs = kwargs self.read_load_balancer = LoadBalancer() @@ -1249,32 +1244,6 @@ def populate_startup_nodes(self, nodes): for n in nodes: self.startup_nodes[n.name] = n - def cluster_require_full_coverage(self, cluster_nodes): - """ - if exists 'cluster-require-full-coverage no' config on redis servers, - then even all slots are not covered, cluster still will be able to - respond - """ - - def node_require_full_coverage(node): - try: - return ( - "yes" - in node.redis_connection.config_get( - "cluster-require-full-coverage" - ).values() - ) - except ConnectionError: - return False - except Exception as e: - raise RedisClusterException( - 'ERROR sending "config get cluster-require-full-coverage"' - f" command to redis server: {node.name}, {e}" - ) - - # at least one node should have cluster-require-full-coverage yes - return any(node_require_full_coverage(node) for node in cluster_nodes.values()) - def check_slots_coverage(self, slots_cache): # Validate if all slots are covered or if we should try next # startup node @@ -1450,29 +1419,9 @@ def initialize(self): # isn't a full coverage raise RedisClusterException( f"All slots are not covered after query all startup_nodes. " - f"{len(self.slots_cache)} of {REDIS_CLUSTER_HASH_SLOTS} " + f"{len(tmp_slots)} of {REDIS_CLUSTER_HASH_SLOTS} " f"covered..." ) - elif not fully_covered and not self._require_full_coverage: - # The user set require_full_coverage to False. - # In case of full coverage requirement in the cluster's Redis - # configurations, we will raise an exception. Otherwise, we may - # continue with partial coverage. - # see Redis Cluster configuration parameters in - # https://redis.io/topics/cluster-tutorial - if ( - not self._skip_full_coverage_check - and self.cluster_require_full_coverage(tmp_nodes_cache) - ): - raise RedisClusterException( - "Not all slots are covered but the cluster's " - "configuration requires full coverage. Set " - "cluster-require-full-coverage configuration to no on " - "all of the cluster nodes if you wish the cluster to " - "be able to serve without being fully covered." - f"{len(self.slots_cache)} of {REDIS_CLUSTER_HASH_SLOTS} " - f"covered..." - ) # Set the tmp variables to the real variables self.nodes_cache = tmp_nodes_cache diff --git a/redis/commands/core.py b/redis/commands/core.py index 4f0accd957..8dfe023661 100644 --- a/redis/commands/core.py +++ b/redis/commands/core.py @@ -2,6 +2,7 @@ import hashlib import time import warnings +from typing import List, Optional from redis.exceptions import ConnectionError, DataError, NoScriptError, RedisError @@ -3255,6 +3256,32 @@ def bzpopmin(self, keys, timeout=0): keys.append(timeout) return self.execute_command("BZPOPMIN", *keys) + def zmpop( + self, + num_keys: int, + keys: List[str], + min: Optional[bool] = False, + max: Optional[bool] = False, + count: Optional[int] = 1, + ) -> list: + """ + Pop ``count`` values (default 1) off of the first non-empty sorted set + named in the ``keys`` list. + + For more information check https://redis.io/commands/zmpop + """ + args = [num_keys] + keys + if (min and max) or (not min and not max): + raise DataError + elif min: + args.append("MIN") + else: + args.append("MAX") + if count != 1: + args.extend(["COUNT", count]) + + return self.execute_command("ZMPOP", *args) + def _zrange( self, command, diff --git a/redis/commands/graph/commands.py b/redis/commands/graph/commands.py index 1db8275223..fa0a9da55f 100644 --- a/redis/commands/graph/commands.py +++ b/redis/commands/graph/commands.py @@ -35,7 +35,7 @@ def query(self, q, params=None, timeout=None, read_only=False, profile=False): Args: - q : + q : str The query. params : dict Query parameters. @@ -178,7 +178,7 @@ def config(self, name, value=None, set=False): name : str The name of the configuration value : - The value we want to ser (can be used only when `set` is on) + The value we want to set (can be used only when `set` is on) set : bool Turn on to set a configuration. Default behavior is get. """ diff --git a/redis/commands/parser.py b/redis/commands/parser.py index dadf3c6bf8..4cce800ec3 100644 --- a/redis/commands/parser.py +++ b/redis/commands/parser.py @@ -103,6 +103,7 @@ def _get_pubsub_keys(self, *args): return None args = [str_if_bytes(arg) for arg in args] command = args[0].upper() + keys = None if command == "PUBSUB": # the second argument is a part of the command name, e.g. # ['PUBSUB', 'NUMSUB', 'foo']. @@ -117,6 +118,4 @@ def _get_pubsub_keys(self, *args): # format example: # PUBLISH channel message keys = [args[1]] - else: - keys = None return keys diff --git a/redis/commands/search/commands.py b/redis/commands/search/commands.py index d22afeb875..3f768ab320 100644 --- a/redis/commands/search/commands.py +++ b/redis/commands/search/commands.py @@ -44,7 +44,12 @@ NOOFFSETS = "NOOFFSETS" NOFIELDS = "NOFIELDS" +NOHL = "NOHL" +NOFREQS = "NOFREQS" +MAXTEXTFIELDS = "MAXTEXTFIELDS" +TEMPORARY = "TEMPORARY" STOPWORDS = "STOPWORDS" +SKIPINITIALSCAN = "SKIPINITIALSCAN" WITHSCORES = "WITHSCORES" FUZZY = "FUZZY" WITHPAYLOADS = "WITHPAYLOADS" @@ -66,6 +71,11 @@ def create_index( no_field_flags=False, stopwords=None, definition=None, + max_text_fields=False, + temporary=None, + no_highlight=False, + no_term_frequencies=False, + skip_initial_scan=False, ): """ Create the search index. The index must not already exist. @@ -73,9 +83,23 @@ def create_index( ### Parameters: - **fields**: a list of TextField or NumericField objects - - **no_term_offsets**: If true, we will not save term offsets in the index - - **no_field_flags**: If true, we will not save field flags that allow searching in specific fields - - **stopwords**: If not None, we create the index with this custom stopword list. The list can be empty + - **no_term_offsets**: If true, we will not save term offsets in + the index + - **no_field_flags**: If true, we will not save field flags that + allow searching in specific fields + - **stopwords**: If not None, we create the index with this custom + stopword list. The list can be empty + - **max_text_fields**: If true, we will encode indexes as if there + were more than 32 text fields which allows you to add additional + fields (beyond 32). + - **temporary**: Create a lightweight temporary index which will + expire after the specified period of inactivity (in seconds). The + internal idle timer is reset whenever the index is searched or added to. + - **no_highlight**: If true, disabling highlighting support. + Also implied by no_term_offsets. + - **no_term_frequencies**: If true, we avoid saving the term frequencies + in the index. + - **skip_initial_scan**: If true, we do not scan and index. For more information: https://oss.redis.com/redisearch/Commands/#ftcreate """ # noqa @@ -83,10 +107,21 @@ def create_index( args = [CREATE_CMD, self.index_name] if definition is not None: args += definition.args + if max_text_fields: + args.append(MAXTEXTFIELDS) + if temporary is not None and isinstance(temporary, int): + args.append(TEMPORARY) + args.append(temporary) if no_term_offsets: args.append(NOOFFSETS) + if no_highlight: + args.append(NOHL) if no_field_flags: args.append(NOFIELDS) + if no_term_frequencies: + args.append(NOFREQS) + if skip_initial_scan: + args.append(SKIPINITIALSCAN) if stopwords is not None and isinstance(stopwords, (list, tuple, set)): args += [STOPWORDS, len(stopwords)] if len(stopwords) > 0: @@ -129,7 +164,6 @@ def dropindex(self, delete_documents=False): ### Parameters: - **delete_documents**: If `True`, all documents will be deleted. - For more information: https://oss.redis.com/redisearch/Commands/#ftdropindex """ # noqa keep_str = "" if delete_documents else "KEEPDOCS" @@ -217,23 +251,27 @@ def add_document( ### Parameters - **doc_id**: the id of the saved document. - - **nosave**: if set to true, we just index the document, and don't \ - save a copy of it. This means that searches will just return ids. - - **score**: the document ranking, between 0.0 and 1.0. - - **payload**: optional inner-index payload we can save for fast access in scoring functions - - **replace**: if True, and the document already is in the index, \ + - **nosave**: if set to true, we just index the document, and don't + save a copy of it. This means that searches will just + return ids. + - **score**: the document ranking, between 0.0 and 1.0 + - **payload**: optional inner-index payload we can save for fast + i access in scoring functions + - **replace**: if True, and the document already is in the index, we perform an update and reindex the document - - **partial**: if True, the fields specified will be added to the \ - existing document. \ - This has the added benefit that any fields specified \ - with `no_index` will not be reindexed again. Implies `replace` + - **partial**: if True, the fields specified will be added to the + existing document. + This has the added benefit that any fields specified + with `no_index` + will not be reindexed again. Implies `replace` - **language**: Specify the language used for document tokenization. - - **no_create**: if True, the document is only updated and reindexed \ - if it already exists. If the document does not exist, an error will be \ - returned. Implies `replace` - - **fields** kwargs dictionary of the document fields to be saved and/or indexed. - - NOTE: Geo points shoule be encoded as strings of "lon,lat" + - **no_create**: if True, the document is only updated and reindexed + if it already exists. + If the document does not exist, an error will be + returned. Implies `replace` + - **fields** kwargs dictionary of the document fields to be saved + and/or indexed. + NOTE: Geo points shoule be encoded as strings of "lon,lat" For more information: https://oss.redis.com/redisearch/Commands/#ftadd """ # noqa @@ -481,7 +519,7 @@ def spellcheck(self, query, distance=None, include=None, exclude=None): **query**: search query. **distance***: the maximal Levenshtein distance for spelling - suggestions (default: 1, max: 4). + suggestions (default: 1, max: 4). **include**: specifies an inclusion custom dictionary. **exclude**: specifies an exclusion custom dictionary. diff --git a/redis/connection.py b/redis/connection.py index 8fdb4bdf8c..4178f67c57 100755 --- a/redis/connection.py +++ b/redis/connection.py @@ -1317,7 +1317,7 @@ def get_connection(self, command_name, *keys, **options): try: if connection.can_read(): raise ConnectionError("Connection has data") - except ConnectionError: + except (ConnectionError, OSError): connection.disconnect() connection.connect() if connection.can_read(): diff --git a/requirements.txt b/requirements.txt index f1e7e7ecdc..b05ff454bf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -deprecated -packaging +deprecated>=1.2.3 +packaging>=20.4 diff --git a/setup.py b/setup.py index 7733220838..559e521982 100644 --- a/setup.py +++ b/setup.py @@ -21,12 +21,18 @@ ] ), url="https://github.com/redis/redis-py", + project_urls={ + "Documentation": "https://redis.readthedocs.io/en/latest/", + "Changes": "https://github.com/redis/redis-py/releases", + "Code": "https://github.com/redis/redis-py", + "Issue tracker": "https://github.com/redis/redis-py/issues", + }, author="Redis Inc.", author_email="oss@redis.com", python_requires=">=3.6", install_requires=[ "deprecated>=1.2.3", - "packaging>=21.3", + "packaging>=20.4", 'importlib-metadata >= 1.0; python_version < "3.8"', ], classifiers=[ diff --git a/tests/test_cluster.py b/tests/test_cluster.py index 496ed9818b..90f52d4899 100644 --- a/tests/test_cluster.py +++ b/tests/test_cluster.py @@ -28,6 +28,7 @@ NoPermissionError, RedisClusterException, RedisError, + ResponseError, ) from redis.utils import str_if_bytes from tests.test_pubsub import wait_for_message @@ -628,6 +629,32 @@ def test_get_node_from_key(self, r): assert replica.server_type == REPLICA assert replica in slot_nodes + def test_not_require_full_coverage_cluster_down_error(self, r): + """ + When require_full_coverage is set to False (default client config) and not + all slots are covered, if one of the nodes has 'cluster-require_full_coverage' + config set to 'yes' some key-based commands should throw ClusterDownError + """ + node = r.get_node_from_key("foo") + missing_slot = r.keyslot("foo") + assert r.set("foo", "bar") is True + try: + assert all(r.cluster_delslots(missing_slot)) + with pytest.raises(ClusterDownError): + r.exists("foo") + finally: + try: + # Add back the missing slot + assert r.cluster_addslots(node, missing_slot) is True + # Make sure we are not getting ClusterDownError anymore + assert r.exists("foo") == 1 + except ResponseError as e: + if f"Slot {missing_slot} is already busy" in str(e): + # It can happen if the test failed to delete this slot + pass + else: + raise e + @pytest.mark.onlycluster class TestClusterRedisCommands: @@ -1848,40 +1875,20 @@ def test_init_slots_cache_not_all_slots_covered(self): [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]], ] with pytest.raises(RedisClusterException) as ex: - get_mocked_redis_client( - host=default_host, port=default_port, cluster_slots=cluster_slots - ) - assert str(ex.value).startswith( - "All slots are not covered after query all startup_nodes." - ) - - def test_init_slots_cache_not_require_full_coverage_error(self): - """ - When require_full_coverage is set to False and not all slots are - covered, if one of the nodes has 'cluster-require_full_coverage' - config set to 'yes' the cluster initialization should fail - """ - # Missing slot 5460 - cluster_slots = [ - [0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]], - [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]], - [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]], - ] - - with pytest.raises(RedisClusterException): get_mocked_redis_client( host=default_host, port=default_port, cluster_slots=cluster_slots, - require_full_coverage=False, - coverage_result="yes", + require_full_coverage=True, ) + assert str(ex.value).startswith( + "All slots are not covered after query all startup_nodes." + ) def test_init_slots_cache_not_require_full_coverage_success(self): """ When require_full_coverage is set to False and not all slots are - covered, if all of the nodes has 'cluster-require_full_coverage' - config set to 'no' the cluster initialization should succeed + covered the cluster client initialization should succeed """ # Missing slot 5460 cluster_slots = [ @@ -1895,39 +1902,10 @@ def test_init_slots_cache_not_require_full_coverage_success(self): port=default_port, cluster_slots=cluster_slots, require_full_coverage=False, - coverage_result="no", ) assert 5460 not in rc.nodes_manager.slots_cache - def test_init_slots_cache_not_require_full_coverage_skips_check(self): - """ - Test that when require_full_coverage is set to False and - skip_full_coverage_check is set to true, the cluster initialization - succeed without checking the nodes' Redis configurations - """ - # Missing slot 5460 - cluster_slots = [ - [0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]], - [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]], - [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]], - ] - - with patch.object( - NodesManager, "cluster_require_full_coverage" - ) as conf_check_mock: - rc = get_mocked_redis_client( - host=default_host, - port=default_port, - cluster_slots=cluster_slots, - require_full_coverage=False, - skip_full_coverage_check=True, - coverage_result="no", - ) - - assert conf_check_mock.called is False - assert 5460 not in rc.nodes_manager.slots_cache - def test_init_slots_cache(self): """ Test that slots cache can in initialized and all slots are covered diff --git a/tests/test_commands.py b/tests/test_commands.py index b28b63ea6e..a11dc6ce22 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -2058,6 +2058,17 @@ def test_bzpopmin(self, r): r.zadd("c", {"c1": 100}) assert r.bzpopmin("c", timeout=1) == (b"c", b"c1", 100) + @pytest.mark.onlynoncluster + # @skip_if_server_version_lt("7.0.0") turn on after redis 7 release + def test_zmpop(self, unstable_r): + unstable_r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) + res = [b"a", [[b"a1", b"1"], [b"a2", b"2"]]] + assert unstable_r.zmpop("2", ["b", "a"], min=True, count=2) == res + with pytest.raises(redis.DataError): + unstable_r.zmpop("2", ["b", "a"], count=2) + unstable_r.zadd("b", {"b1": 10, "ab": 9, "b3": 8}) + assert unstable_r.zmpop("2", ["b", "a"], max=True) == [b"b", [[b"b1", b"10"]]] + def test_zrange(self, r): r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert r.zrange("a", 0, 1) == [b"a1", b"a2"] diff --git a/tests/test_search.py b/tests/test_search.py index 7d666cbd96..6c79041cfe 100644 --- a/tests/test_search.py +++ b/tests/test_search.py @@ -1154,6 +1154,72 @@ def test_index_definition(client): createIndex(client.ft(), num_docs=500, definition=definition) +@pytest.mark.redismod +def testExpire(client): + client.ft().create_index((TextField("txt", sortable=True),), temporary=4) + ttl = client.execute_command("ft.debug", "TTL", "idx") + assert ttl > 2 + + while ttl > 2: + ttl = client.execute_command("ft.debug", "TTL", "idx") + time.sleep(0.01) + + # add document - should reset the ttl + client.ft().add_document("doc", txt="foo bar", text="this is a simple test") + ttl = client.execute_command("ft.debug", "TTL", "idx") + assert ttl > 2 + try: + while True: + ttl = client.execute_command("ft.debug", "TTL", "idx") + time.sleep(0.5) + except redis.exceptions.ResponseError: + assert ttl == 0 + + +@pytest.mark.redismod +def testSkipInitialScan(client): + client.hset("doc1", "foo", "bar") + q = Query("@foo:bar") + + client.ft().create_index((TextField("foo"),), skip_initial_scan=True) + assert 0 == client.ft().search(q).total + + +@pytest.mark.redismod +def testSummarizeDisabled_nooffset(client): + client.ft().create_index((TextField("txt"),), no_term_offsets=True) + client.ft().add_document("doc1", txt="foo bar") + with pytest.raises(Exception): + client.ft().search(Query("foo").summarize(fields=["txt"])) + + +@pytest.mark.redismod +def testSummarizeDisabled_nohl(client): + client.ft().create_index((TextField("txt"),), no_highlight=True) + client.ft().add_document("doc1", txt="foo bar") + with pytest.raises(Exception): + client.ft().search(Query("foo").summarize(fields=["txt"])) + + +@pytest.mark.redismod +def testMaxTextFields(client): + # Creating the index definition + client.ft().create_index((TextField("f0"),)) + for x in range(1, 32): + client.ft().alter_schema_add((TextField(f"f{x}"),)) + + # Should be too many indexes + with pytest.raises(redis.ResponseError): + client.ft().alter_schema_add((TextField(f"f{x}"),)) + + client.ft().dropindex("idx") + # Creating the index definition + client.ft().create_index((TextField("f0"),), max_text_fields=True) + # Fill the index with fields + for x in range(1, 50): + client.ft().alter_schema_add((TextField(f"f{x}"),)) + + @pytest.mark.redismod @skip_ifmodversion_lt("2.0.0", "search") def test_create_client_definition(client):