diff --git a/.gitignore b/.gitignore
index 0d51c10..e6bad95 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
 # Custom
 .envrc
 .env
+.venv
 tmp/
 
 .DS_Store
diff --git a/bin/tflocal b/bin/tflocal
index 68e89bd..0f67238 100755
--- a/bin/tflocal
+++ b/bin/tflocal
@@ -60,6 +60,7 @@ terraform {
     bucket         = "<bucket>"
     key            = "<key>"
     dynamodb_table = "<dynamodb_table>"
+    use_path_style = "<use_path_style>"
 
     access_key        = "test"
     secret_key        = "test"
@@ -220,6 +221,7 @@ def generate_s3_backend_config() -> str:
         "key": "terraform.tfstate",
         "dynamodb_table": "tf-test-state",
         "region": get_region(),
+        "use_path_style": "false",
         "endpoints": {
             "s3": get_service_endpoint("s3"),
             "iam": get_service_endpoint("iam"),
@@ -242,11 +244,19 @@ def generate_s3_backend_config() -> str:
         backend_config["endpoints"] = {
             k: backend_config["endpoints"].get(k) or v
             for k, v in configs["endpoints"].items()}
+    # If the endpoint is configured in the backend we use that endpoint
+    # otherwise we will use the default created for the provider. Note that the user
+    # can still override this value with `use_path_style`in backend tf config
+    if use_s3_path_style(backend_config.get("endpoints", {}).get("s3")):
+        backend_config["use_path_style"] = "true"
     configs.update(backend_config)
     if not DRY_RUN:
         get_or_create_bucket(configs["bucket"])
         get_or_create_ddb_table(configs["dynamodb_table"], region=configs["region"])
     result = TF_S3_BACKEND_CONFIG
+    if is_tf_legacy:
+        result = result.replace("use_path_style", "force_path_style")
+        configs["force_path_style"] = configs.pop("use_path_style")
     for key, value in configs.items():
         if isinstance(value, bool):
             value = str(value).lower()
@@ -289,7 +299,7 @@ def check_override_file(providers_file: str) -> None:
 # AWS CLIENT UTILS
 # ---
 
-def use_s3_path_style() -> bool:
+def use_s3_path_style(endpoint: str = "") -> bool:
     """
     Whether to use S3 path addressing (depending on the configured S3 endpoint)
     If the endpoint starts with the `s3.` prefix, LocalStack will recognize virtual host addressing. If the endpoint
@@ -297,7 +307,9 @@ def use_s3_path_style() -> bool:
     inter container communications in Docker.
     """
     try:
-        host = urlparse(get_service_endpoint("s3")).hostname
+        if endpoint:
+            endpoint = add_http_protocol(endpoint)
+        host = urlparse(endpoint or get_service_endpoint("s3")).hostname
     except ValueError:
         host = ""
 
@@ -343,15 +355,20 @@ def deactivate_access_key(access_key: str) -> str:
     return "L" + access_key[1:] if access_key[0] == "A" else access_key
 
 
+def add_http_protocol(url: str) -> str:
+    """Add the http:// protocal if not found in the url"""
+    if "://" not in url:
+        return f"http://{url}"
+    return url
+
+
 def get_service_endpoint(service: str) -> str:
     """Get the service endpoint URL for the given service name"""
     # allow configuring a custom endpoint via the environment
     env_name = f"{service.replace('-', '_').upper().strip()}_ENDPOINT"
     env_endpoint = os.environ.get(env_name, "").strip()
     if env_endpoint:
-        if "://" not in env_endpoint:
-            env_endpoint = f"http://{env_endpoint}"
-        return env_endpoint
+        return add_http_protocol(env_endpoint)
 
     # some services need specific hostnames
     hostname = LOCALSTACK_HOSTNAME
diff --git a/tests/test_apply.py b/tests/test_apply.py
index 69a471a..f0c7ef2 100644
--- a/tests/test_apply.py
+++ b/tests/test_apply.py
@@ -113,23 +113,39 @@ def test_s3_path_addressing():
 
 
 def test_use_s3_path_style(monkeypatch):
-    monkeypatch.setenv("S3_HOSTNAME", "s3.localhost.localstack.cloud")
+    s3_hostname = "s3.localhost.localstack.cloud"
+    monkeypatch.setenv("S3_HOSTNAME", s3_hostname)
     import_cli_code()
     assert not use_s3_path_style()  # noqa
+    assert not use_s3_path_style(s3_hostname)  # noqa
 
-    monkeypatch.setenv("S3_HOSTNAME", "localhost")
+    s3_hostname = "localhost"
+    monkeypatch.setenv("S3_HOSTNAME", s3_hostname)
     import_cli_code()
     assert use_s3_path_style()  # noqa
+    assert use_s3_path_style(s3_hostname)  # noqa
 
     # test the case where the S3_HOSTNAME could be a Docker container name
-    monkeypatch.setenv("S3_HOSTNAME", "localstack")
+    s3_hostname = "localstack"
+    monkeypatch.setenv("S3_HOSTNAME", s3_hostname)
     import_cli_code()
     assert use_s3_path_style()  # noqa
+    assert use_s3_path_style(s3_hostname)  # noqa
 
     # test the case where the S3_HOSTNAME could be an arbitrary host starting with `s3.`
-    monkeypatch.setenv("S3_HOSTNAME", "s3.internal.host")
+    s3_hostname = "s3.internal.host"
+    monkeypatch.setenv("S3_HOSTNAME", s3_hostname)
     import_cli_code()
     assert not use_s3_path_style()  # noqa
+    assert not use_s3_path_style(s3_hostname)  # noqa
+
+    # test the case where the S3_HOSTNAME where a provided host name differs from env default.`
+    s3_hostname = "s3.internal.host"
+    backend_host_name = "localstack"
+    monkeypatch.setenv("S3_HOSTNAME", s3_hostname)
+    import_cli_code()
+    assert not use_s3_path_style()  # noqa
+    assert use_s3_path_style(backend_host_name)  # noqa
 
 
 def test_provider_aliases():
@@ -199,6 +215,74 @@ def test_s3_backend():
     assert result["ResponseMetadata"]["HTTPStatusCode"] == 200
 
 
+@pytest.mark.parametrize("s3_hostname", ["", "localstack", "s3.amazonaws.com"])
+def test_s3_backend_use_path_style_with_hostname(monkeypatch, s3_hostname):
+    monkeypatch.setenv("DRY_RUN", "1")
+    monkeypatch.setenv("S3_HOSTNAME", s3_hostname)
+
+    state_bucket = f"tf-state-{short_uid()}"
+    config = """
+    terraform {
+      backend "s3" {
+        bucket = "%s"
+        key    = "terraform.tfstate"
+        region = "us-east-2"
+        skip_credentials_validation = true
+      }
+    }
+    """ % state_bucket
+    temp_dir = deploy_tf_script(config, cleanup=False, user_input="yes")
+
+    # Assert the override file set path style appropriately
+    override_file = os.path.join(temp_dir, "localstack_providers_override.tf")
+
+    assert check_override_file_exists(override_file)
+    with open(override_file, "r") as fp:
+        result = hcl2.load(fp)
+        result = result["terraform"][0]["backend"][0]["s3"]
+
+    # if the hostname isn't provided or is a s3.* we shouldn't use path name
+    use_path_style = s3_hostname != "" and not s3_hostname.startswith("s3.")
+    path_style_key = "force_path_style" if is_legacy_tf_version(get_version()) else "use_path_style"
+    assert result[path_style_key] == json.dumps(use_path_style)
+
+    rmtree(temp_dir)
+
+
+@pytest.mark.parametrize("s3_endpoint", ["http://localhost:4566", "https://s3.amazonaws.com"])
+def test_s3_backend_use_path_style_with_endpoint(monkeypatch, s3_endpoint):
+    monkeypatch.setenv("DRY_RUN", "1")
+
+    state_bucket = f"tf-state-{short_uid()}"
+    config = """
+    terraform {
+      backend "s3" {
+        bucket = "%s"
+        key    = "terraform.tfstate"
+        region = "us-east-2"
+        skip_credentials_validation = true
+        endpoint = "%s"
+      }
+    }
+    """ % (state_bucket, s3_endpoint)
+    temp_dir = deploy_tf_script(config, cleanup=False, user_input="yes")
+
+    # Assert the override file set path style appropriately
+    override_file = os.path.join(temp_dir, "localstack_providers_override.tf")
+
+    assert check_override_file_exists(override_file)
+    with open(override_file, "r") as fp:
+        result = hcl2.load(fp)
+        result = result["terraform"][0]["backend"][0]["s3"]
+
+    # if endpoint is a s3.* we shouldn't use path name
+    use_path_style = s3_endpoint.find("s3.") == -1
+    path_style_key = "force_path_style" if is_legacy_tf_version(get_version()) else "use_path_style"
+    assert result[path_style_key] == json.dumps(use_path_style)
+
+    rmtree(temp_dir)
+
+
 def test_dry_run(monkeypatch):
     monkeypatch.setenv("DRY_RUN", "1")
     state_bucket = "tf-state-dry-run"