From fbc6651f5d8889c965d318e2ae25fb2918be8cdf Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 3 May 2024 16:05:59 -0600 Subject: [PATCH 1/5] minor: bump clippy to 1.78.0 --- .evergreen/check-clippy.sh | 4 ++-- src/client/auth/oidc.rs | 2 +- src/client/options/parse.rs | 2 +- src/lib.rs | 2 +- src/sdam/description/topology.rs | 2 +- src/sdam/srv_polling/test.rs | 4 ++-- src/test.rs | 2 +- src/test/spec/initial_dns_seedlist_discovery.rs | 4 +++- src/test/spec/retryable_reads.rs | 5 ++--- src/test/spec/retryable_writes.rs | 7 +++---- src/test/spec/trace.rs | 10 +++++----- src/test/spec/unified_runner/operation.rs | 4 +++- src/test/spec/unified_runner/test_file.rs | 2 +- 13 files changed, 26 insertions(+), 24 deletions(-) diff --git a/.evergreen/check-clippy.sh b/.evergreen/check-clippy.sh index 7d507af3d..3fb3911c2 100755 --- a/.evergreen/check-clippy.sh +++ b/.evergreen/check-clippy.sh @@ -5,7 +5,7 @@ set -o errexit source ./.evergreen/env.sh # Pin clippy to the latest version. This should be updated when new versions of Rust are released. -CLIPPY_VERSION=1.75.0 +CLIPPY_VERSION=1.78.0 rustup install $CLIPPY_VERSION @@ -13,4 +13,4 @@ rustup install $CLIPPY_VERSION cargo +$CLIPPY_VERSION clippy --all-targets -p mongodb -- -D warnings # Check with all features. -cargo +$CLIPPY_VERSION clippy --all-targets --all-features -p mongodb -- -D warnings \ No newline at end of file +cargo +$CLIPPY_VERSION clippy --all-targets --all-features -p mongodb -- -D warnings diff --git a/src/client/auth/oidc.rs b/src/client/auth/oidc.rs index 67ca01dca..f8036128c 100644 --- a/src/client/auth/oidc.rs +++ b/src/client/auth/oidc.rs @@ -279,7 +279,7 @@ impl Cache { self.idp_server_info = idp_server_info; } self.access_token = Some(response.access_token.clone()); - self.refresh_token = response.refresh_token.clone(); + self.refresh_token.clone_from(&response.refresh_token); self.last_call_time = Instant::now(); self.token_gen_id += 1; } diff --git a/src/client/options/parse.rs b/src/client/options/parse.rs index c8542068d..bf7b22bd9 100644 --- a/src/client/options/parse.rs +++ b/src/client/options/parse.rs @@ -21,7 +21,7 @@ impl Action for ParseConnectionString { let mut options = ClientOptions::from_connection_string(conn_str); #[cfg(feature = "dns-resolver")] { - options.resolver_config = self.resolver_config.clone(); + options.resolver_config.clone_from(&self.resolver_config); } let resolved = host_info.resolve(self.resolver_config).await?; diff --git a/src/lib.rs b/src/lib.rs index 9cf9e7e96..af563acbd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,7 +4,7 @@ #![warn(clippy::cast_possible_truncation)] #![warn(clippy::cast_possible_wrap)] #![cfg_attr( - feature = "cargo-clippy", + feature = "clippy", allow( clippy::unreadable_literal, clippy::cognitive_complexity, diff --git a/src/sdam/description/topology.rs b/src/sdam/description/topology.rs index 2b4536ecd..6d9dd89d1 100644 --- a/src/sdam/description/topology.rs +++ b/src/sdam/description/topology.rs @@ -175,7 +175,7 @@ impl TopologyDescription { } self.single_seed = self.servers.len() == 1; - self.set_name = options.repl_set_name.clone(); + self.set_name.clone_from(&options.repl_set_name); self.local_threshold = options.local_threshold; self.heartbeat_freq = options.heartbeat_freq; self.srv_max_hosts = options.srv_max_hosts; diff --git a/src/sdam/srv_polling/test.rs b/src/sdam/srv_polling/test.rs index 547dc299b..6b0310d7e 100644 --- a/src/sdam/srv_polling/test.rs +++ b/src/sdam/srv_polling/test.rs @@ -43,7 +43,7 @@ async fn run_test_extra( new_hosts: Result>, ) -> HashSet { let mut options = ClientOptions::new_srv(); - options.hosts = DEFAULT_HOSTS.clone(); + options.hosts.clone_from(&DEFAULT_HOSTS); options.test_options_mut().disable_monitoring_threads = true; options.srv_max_hosts = max_hosts; let mut topology = Topology::new(options.clone()).unwrap(); @@ -132,7 +132,7 @@ async fn load_balanced_no_srv_polling() { let hosts = vec![localhost_test_build_10gen(27017)]; let mut options = ClientOptions::new_srv(); let rescan_interval = options.original_srv_info.as_ref().cloned().unwrap().min_ttl; - options.hosts = hosts.clone(); + options.hosts.clone_from(&hosts); options.load_balanced = Some(true); options.test_options_mut().mock_lookup_hosts = Some(make_lookup_hosts(vec![ localhost_test_build_10gen(27017), diff --git a/src/test.rs b/src/test.rs index e7dd6f47f..a0dea5362 100644 --- a/src/test.rs +++ b/src/test.rs @@ -114,7 +114,7 @@ pub(crate) static DEFAULT_GLOBAL_TRACING_HANDLER: Lazy = Lazy::n pub(crate) fn update_options_for_testing(options: &mut ClientOptions) { if options.server_api.is_none() { - options.server_api = SERVER_API.clone(); + options.server_api.clone_from(&SERVER_API); } #[cfg(any( diff --git a/src/test/spec/initial_dns_seedlist_discovery.rs b/src/test/spec/initial_dns_seedlist_discovery.rs index 944ebb918..32b619c5e 100644 --- a/src/test/spec/initial_dns_seedlist_discovery.rs +++ b/src/test/spec/initial_dns_seedlist_discovery.rs @@ -135,7 +135,9 @@ async fn run_test(mut test_file: TestFile) { } else { let mut options_with_tls = options.clone(); if requires_tls { - options_with_tls.tls = get_client_options().await.tls.clone(); + options_with_tls + .tls + .clone_from(&get_client_options().await.tls); } let client = Client::with_options(options_with_tls).unwrap(); diff --git a/src/test/spec/retryable_reads.rs b/src/test/spec/retryable_reads.rs index edf4972d6..e5f1a5422 100644 --- a/src/test/spec/retryable_reads.rs +++ b/src/test/spec/retryable_reads.rs @@ -138,9 +138,8 @@ async fn retry_read_pool_cleared() { .expect("pool clear should occur"); let next_cmap_events = subscriber - .collect_events(Duration::from_millis(1000), |event| match event { - Event::Cmap(_) => true, - _ => false, + .collect_events(Duration::from_millis(1000), |event| { + matches!(event, Event::Cmap(_)) }) .await; diff --git a/src/test/spec/retryable_writes.rs b/src/test/spec/retryable_writes.rs index 6eb616f48..f71214fe6 100644 --- a/src/test/spec/retryable_writes.rs +++ b/src/test/spec/retryable_writes.rs @@ -49,7 +49,7 @@ async fn run_legacy() { continue; } let mut options = test_case.client_options.unwrap_or_default(); - options.hosts = get_client_options().await.hosts.clone(); + options.hosts.clone_from(&get_client_options().await.hosts); if options.heartbeat_freq.is_none() { options.heartbeat_freq = Some(MIN_HEARTBEAT_FREQUENCY); } @@ -455,9 +455,8 @@ async fn retry_write_pool_cleared() { .expect("pool clear should occur"); let next_cmap_events = subscriber - .collect_events(Duration::from_millis(1000), |event| match event { - Event::Cmap(_) => true, - _ => false, + .collect_events(Duration::from_millis(1000), |event| { + matches!(event, Event::Cmap(_)) }) .await; diff --git a/src/test/spec/trace.rs b/src/test/spec/trace.rs index 1a51cfd6e..2829967be 100644 --- a/src/test/spec/trace.rs +++ b/src/test/spec/trace.rs @@ -49,27 +49,27 @@ fn tracing_truncation() { assert_eq!(s, String::from("...")); // we should "round up" to the end of the first emoji - s = two_emoji.clone(); + s.clone_from(&two_emoji); truncate_on_char_boundary(&mut s, 1); assert_eq!(s, String::from("🤔...")); // 4 is a boundary, so we should truncate there - s = two_emoji.clone(); + s.clone_from(&two_emoji); truncate_on_char_boundary(&mut s, 4); assert_eq!(s, String::from("🤔...")); // we should round up to the full string - s = two_emoji.clone(); + s.clone_from(&two_emoji); truncate_on_char_boundary(&mut s, 5); assert_eq!(s, two_emoji); // end of string is a boundary, so we should truncate there - s = two_emoji.clone(); + s.clone_from(&two_emoji); truncate_on_char_boundary(&mut s, 8); assert_eq!(s, two_emoji); // we should get the full string back if the new length is longer than the original - s = two_emoji.clone(); + s.clone_from(&two_emoji); truncate_on_char_boundary(&mut s, 10); assert_eq!(s, two_emoji); } diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index 058f135f2..9be1a8b7e 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -124,6 +124,7 @@ pub(crate) trait TestOperation: Debug + Send + Sync { /// If this operation specifies entities to create, returns those entities. Otherwise, /// returns None. + #[cfg(feature = "tracing-unstable")] fn test_file_entities(&self) -> Option<&Vec> { None } @@ -2307,7 +2308,7 @@ impl TestOperation for RenameCollection { let target = test_runner.get_collection(id).await; let ns = target.namespace(); let mut to_ns = ns.clone(); - to_ns.coll = self.to.clone(); + to_ns.coll.clone_from(&self.to); let cmd = doc! { "renameCollection": crate::bson::to_bson(&ns)?, "to": crate::bson::to_bson(&to_ns)?, @@ -2741,6 +2742,7 @@ impl TestOperation for CreateEntities { .boxed() } + #[cfg(feature = "tracing-unstable")] fn test_file_entities(&self) -> Option<&Vec> { Some(&self.entities) } diff --git a/src/test/spec/unified_runner/test_file.rs b/src/test/spec/unified_runner/test_file.rs index 7292b55f8..6bb2b68b7 100644 --- a/src/test/spec/unified_runner/test_file.rs +++ b/src/test/spec/unified_runner/test_file.rs @@ -500,7 +500,7 @@ impl ExpectError { let description = description.as_ref(); if let Some(is_client_error) = self.is_client_error { - if is_client_error != !error.is_server_error() { + if is_client_error == error.is_server_error() { return Err(format!( "{}: expected client error but got {:?}", description, error From e06c99ae00bc76cae58899e143b2313785b6846a Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 10 May 2024 17:09:29 -0600 Subject: [PATCH 2/5] RUST-1945 Add a with_type method to the Aggregate action --- src/action/aggregate.rs | 72 +++++++++++++++++++++++++++++++++-------- src/test/coll.rs | 46 ++++++++++++++++++++++++++ src/test/db.rs | 36 +++++++++++++++++++++ 3 files changed, 140 insertions(+), 14 deletions(-) diff --git a/src/action/aggregate.rs b/src/action/aggregate.rs index 7872ace6f..785f8619b 100644 --- a/src/action/aggregate.rs +++ b/src/action/aggregate.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{marker::PhantomData, time::Duration}; use bson::Document; @@ -24,8 +24,9 @@ impl Database { /// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more /// information on aggregations. /// - /// `await` will return d[`Result>`] or d[`Result>`] if - /// a `ClientSession` is provided. + /// `await` will return d[`Result>`]. If a [`ClientSession`] was provided, the + /// returned cursor will be a [`SessionCursor`]. If [`with_type`](Aggregate::with_type) was + /// called, the returned cursor will be generic over the `T` specified. #[deeplink] pub fn aggregate(&self, pipeline: impl IntoIterator) -> Aggregate { Aggregate { @@ -33,6 +34,7 @@ impl Database { pipeline: pipeline.into_iter().collect(), options: None, session: ImplicitSession, + _phantom: PhantomData, } } } @@ -46,8 +48,9 @@ where /// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more /// information on aggregations. /// - /// `await` will return d[`Result>`] or d[`Result>`] if - /// a [`ClientSession`] is provided. + /// `await` will return d[`Result>`]. If a [`ClientSession`] was provided, the + /// returned cursor will be a [`SessionCursor`]. If [`with_type`](Aggregate::with_type) was + /// called, the returned cursor will be generic over the `T` specified. #[deeplink] pub fn aggregate(&self, pipeline: impl IntoIterator) -> Aggregate { Aggregate { @@ -55,6 +58,7 @@ where pipeline: pipeline.into_iter().collect(), options: None, session: ImplicitSession, + _phantom: PhantomData, } } } @@ -66,8 +70,10 @@ impl crate::sync::Database { /// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more /// information on aggregations. /// - /// [`run`](Aggregate::run) will return d[`Result>`] or - /// d[`Result>`] if a [`ClientSession`] is provided. + /// [`run`](Aggregate::run) will return d[Result>`]. If a + /// [`crate::sync::ClientSession`] was provided, the returned cursor will be a + /// [`crate::sync::SessionCursor`]. If [`with_type`](Aggregate::with_type) was called, the + /// returned cursor will be generic over the `T` specified. #[deeplink] pub fn aggregate(&self, pipeline: impl IntoIterator) -> Aggregate { self.async_database.aggregate(pipeline) @@ -84,8 +90,10 @@ where /// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more /// information on aggregations. /// - /// [`run`](Aggregate::run) will return d[`Result>`] or - /// d[`Result>`] if a `ClientSession` is provided. + /// [`run`](Aggregate::run) will return d[Result>`]. If a + /// `crate::sync::ClientSession` was provided, the returned cursor will be a + /// `crate::sync::SessionCursor`. If [`with_type`](Aggregate::with_type) was called, the + /// returned cursor will be generic over the `T` specified. #[deeplink] pub fn aggregate(&self, pipeline: impl IntoIterator) -> Aggregate { self.async_collection.aggregate(pipeline) @@ -95,14 +103,15 @@ where /// Run an aggregation operation. Construct with [`Database::aggregate`] or /// [`Collection::aggregate`]. #[must_use] -pub struct Aggregate<'a, Session = ImplicitSession> { +pub struct Aggregate<'a, Session = ImplicitSession, T = Document> { target: AggregateTargetRef<'a>, pipeline: Vec, options: Option, session: Session, + _phantom: PhantomData, } -impl<'a, Session> Aggregate<'a, Session> { +impl<'a, Session, T> Aggregate<'a, Session, T> { option_setters!(options: AggregateOptions; allow_disk_use: bool, batch_size: u32, @@ -130,15 +139,50 @@ impl<'a> Aggregate<'a, ImplicitSession> { pipeline: self.pipeline, options: self.options, session: ExplicitSession(value.into()), + _phantom: PhantomData, } } } -#[action_impl(sync = crate::sync::Cursor)] -impl<'a> Action for Aggregate<'a, ImplicitSession> { +impl<'a, Session> Aggregate<'a, Session, Document> { + /// Use the provided type for the returned cursor. + /// + /// ```rust + /// # use futures_util::TryStreamExt; + /// # use mongodb::{bson::Document, error::Result, Cursor, Database}; + /// # use serde::Deserialize; + /// # async fn run() -> Result<()> { + /// # let database: Database = todo!(); + /// # let pipeline: Vec = todo!(); + /// #[derive(Deserialize)] + /// struct PipelineOutput { + /// len: usize, + /// } + /// + /// let aggregate_cursor = database + /// .aggregate(pipeline) + /// .with_type::() + /// .await?; + /// let aggregate_results: Vec = aggregate_cursor.try_collect().await?; + /// # Ok(()) + /// # } + /// ``` + pub fn with_type(self) -> Aggregate<'a, Session, T> { + Aggregate { + target: self.target, + pipeline: self.pipeline, + options: self.options, + session: self.session, + _phantom: PhantomData, + } + } +} + +#[action_impl(sync = crate::sync::Cursor)] +impl<'a, T> Action for Aggregate<'a, ImplicitSession, T> { type Future = AggregateFuture; - async fn execute(mut self) -> Result> { + async fn execute(mut self) -> Result> { resolve_options!( self.target, self.options, diff --git a/src/test/coll.rs b/src/test/coll.rs index 696ccbe91..4d2fd6079 100644 --- a/src/test/coll.rs +++ b/src/test/coll.rs @@ -27,6 +27,7 @@ use crate::{ results::DeleteResult, test::{get_client_options, log_uncaptured, util::TestClient, EventClient}, Collection, + Cursor, IndexModel, }; @@ -1306,3 +1307,48 @@ async fn insert_many_document_sequences() { let second_batch_len = second_event.command.get_array("documents").unwrap().len(); assert_eq!(first_batch_len + second_batch_len, total_docs); } + +#[tokio::test] +async fn aggregate_with_generics() { + #[derive(Serialize)] + struct A { + str: String, + } + + #[derive(Deserialize)] + struct B { + len: i32, + } + + fn assert_document_cursor(_: Cursor) {} + + let client = TestClient::new().await; + let collection = client + .database("aggregate_with_generics") + .collection::("aggregate_with_generics"); + + let a = A { + str: "hi".to_string(), + }; + let len = a.str.len(); + collection.insert_one(&a).await.unwrap(); + + // Assert at compile-time that the default cursor returned is a Cursor + let basic_pipeline = vec![doc! { "$match": { "a": 1 } }]; + let cursor = collection.aggregate(basic_pipeline.clone()).await.unwrap(); + assert_document_cursor(cursor); + + // Assert that data is properly deserialized when using with_type + let project_pipeline = vec![doc! { "$project": { + "str": 1, + "len": { "$strLenBytes": "$str" } + } + }]; + let cursor = collection + .aggregate(project_pipeline) + .with_type::() + .await + .unwrap(); + let lens: Vec = cursor.try_collect().await.unwrap(); + assert_eq!(lens[0].len as usize, len); +} diff --git a/src/test/db.rs b/src/test/db.rs index 9d0c7f4ca..cef9f9da5 100644 --- a/src/test/db.rs +++ b/src/test/db.rs @@ -1,6 +1,7 @@ use std::cmp::Ord; use futures::stream::TryStreamExt; +use serde::Deserialize; use crate::{ action::Action, @@ -17,6 +18,7 @@ use crate::{ results::{CollectionSpecification, CollectionType}, test::util::TestClient, Client, + Cursor, Database, }; @@ -378,3 +380,37 @@ async fn clustered_index_list_collections() { .unwrap(); assert!(clustered_index_collection.options.clustered_index.is_some()); } + +#[tokio::test] +async fn aggregate_with_generics() { + #[derive(Debug, Deserialize, PartialEq)] + struct A { + str: String, + } + + fn assert_document_cursor(_: Cursor) {} + + let client = TestClient::new().await; + let database = client.database("aggregate_with_generics"); + + // The cursor returned will contain these documents + let basic_pipeline = vec![doc! { "$documents": [ { "str": "hi" } ] }]; + + // Assert at compile-time that the default cursor returned is a Cursor + let cursor = database.aggregate(basic_pipeline.clone()).await.unwrap(); + assert_document_cursor(cursor); + + // Assert that data is properly deserialized when using with_type + let mut cursor = database + .aggregate(basic_pipeline) + .with_type::() + .await + .unwrap(); + assert!(cursor.advance().await.unwrap()); + assert_eq!( + cursor.deserialize_current().unwrap(), + A { + str: "hi".to_string() + } + ); +} From fac9ca1213c93091ac518e18d8233c48782ece81 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 10 May 2024 17:09:41 -0600 Subject: [PATCH 3/5] Revert "minor: bump clippy to 1.78.0" This reverts commit fbc6651f5d8889c965d318e2ae25fb2918be8cdf. --- .evergreen/check-clippy.sh | 4 ++-- src/client/auth/oidc.rs | 2 +- src/client/options/parse.rs | 2 +- src/lib.rs | 2 +- src/sdam/description/topology.rs | 2 +- src/sdam/srv_polling/test.rs | 4 ++-- src/test.rs | 2 +- src/test/spec/initial_dns_seedlist_discovery.rs | 4 +--- src/test/spec/retryable_reads.rs | 5 +++-- src/test/spec/retryable_writes.rs | 7 ++++--- src/test/spec/trace.rs | 10 +++++----- src/test/spec/unified_runner/operation.rs | 4 +--- src/test/spec/unified_runner/test_file.rs | 2 +- 13 files changed, 24 insertions(+), 26 deletions(-) diff --git a/.evergreen/check-clippy.sh b/.evergreen/check-clippy.sh index 3fb3911c2..7d507af3d 100755 --- a/.evergreen/check-clippy.sh +++ b/.evergreen/check-clippy.sh @@ -5,7 +5,7 @@ set -o errexit source ./.evergreen/env.sh # Pin clippy to the latest version. This should be updated when new versions of Rust are released. -CLIPPY_VERSION=1.78.0 +CLIPPY_VERSION=1.75.0 rustup install $CLIPPY_VERSION @@ -13,4 +13,4 @@ rustup install $CLIPPY_VERSION cargo +$CLIPPY_VERSION clippy --all-targets -p mongodb -- -D warnings # Check with all features. -cargo +$CLIPPY_VERSION clippy --all-targets --all-features -p mongodb -- -D warnings +cargo +$CLIPPY_VERSION clippy --all-targets --all-features -p mongodb -- -D warnings \ No newline at end of file diff --git a/src/client/auth/oidc.rs b/src/client/auth/oidc.rs index f8036128c..67ca01dca 100644 --- a/src/client/auth/oidc.rs +++ b/src/client/auth/oidc.rs @@ -279,7 +279,7 @@ impl Cache { self.idp_server_info = idp_server_info; } self.access_token = Some(response.access_token.clone()); - self.refresh_token.clone_from(&response.refresh_token); + self.refresh_token = response.refresh_token.clone(); self.last_call_time = Instant::now(); self.token_gen_id += 1; } diff --git a/src/client/options/parse.rs b/src/client/options/parse.rs index bf7b22bd9..c8542068d 100644 --- a/src/client/options/parse.rs +++ b/src/client/options/parse.rs @@ -21,7 +21,7 @@ impl Action for ParseConnectionString { let mut options = ClientOptions::from_connection_string(conn_str); #[cfg(feature = "dns-resolver")] { - options.resolver_config.clone_from(&self.resolver_config); + options.resolver_config = self.resolver_config.clone(); } let resolved = host_info.resolve(self.resolver_config).await?; diff --git a/src/lib.rs b/src/lib.rs index af563acbd..9cf9e7e96 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,7 +4,7 @@ #![warn(clippy::cast_possible_truncation)] #![warn(clippy::cast_possible_wrap)] #![cfg_attr( - feature = "clippy", + feature = "cargo-clippy", allow( clippy::unreadable_literal, clippy::cognitive_complexity, diff --git a/src/sdam/description/topology.rs b/src/sdam/description/topology.rs index 6d9dd89d1..2b4536ecd 100644 --- a/src/sdam/description/topology.rs +++ b/src/sdam/description/topology.rs @@ -175,7 +175,7 @@ impl TopologyDescription { } self.single_seed = self.servers.len() == 1; - self.set_name.clone_from(&options.repl_set_name); + self.set_name = options.repl_set_name.clone(); self.local_threshold = options.local_threshold; self.heartbeat_freq = options.heartbeat_freq; self.srv_max_hosts = options.srv_max_hosts; diff --git a/src/sdam/srv_polling/test.rs b/src/sdam/srv_polling/test.rs index 6b0310d7e..547dc299b 100644 --- a/src/sdam/srv_polling/test.rs +++ b/src/sdam/srv_polling/test.rs @@ -43,7 +43,7 @@ async fn run_test_extra( new_hosts: Result>, ) -> HashSet { let mut options = ClientOptions::new_srv(); - options.hosts.clone_from(&DEFAULT_HOSTS); + options.hosts = DEFAULT_HOSTS.clone(); options.test_options_mut().disable_monitoring_threads = true; options.srv_max_hosts = max_hosts; let mut topology = Topology::new(options.clone()).unwrap(); @@ -132,7 +132,7 @@ async fn load_balanced_no_srv_polling() { let hosts = vec![localhost_test_build_10gen(27017)]; let mut options = ClientOptions::new_srv(); let rescan_interval = options.original_srv_info.as_ref().cloned().unwrap().min_ttl; - options.hosts.clone_from(&hosts); + options.hosts = hosts.clone(); options.load_balanced = Some(true); options.test_options_mut().mock_lookup_hosts = Some(make_lookup_hosts(vec![ localhost_test_build_10gen(27017), diff --git a/src/test.rs b/src/test.rs index a0dea5362..e7dd6f47f 100644 --- a/src/test.rs +++ b/src/test.rs @@ -114,7 +114,7 @@ pub(crate) static DEFAULT_GLOBAL_TRACING_HANDLER: Lazy = Lazy::n pub(crate) fn update_options_for_testing(options: &mut ClientOptions) { if options.server_api.is_none() { - options.server_api.clone_from(&SERVER_API); + options.server_api = SERVER_API.clone(); } #[cfg(any( diff --git a/src/test/spec/initial_dns_seedlist_discovery.rs b/src/test/spec/initial_dns_seedlist_discovery.rs index 32b619c5e..944ebb918 100644 --- a/src/test/spec/initial_dns_seedlist_discovery.rs +++ b/src/test/spec/initial_dns_seedlist_discovery.rs @@ -135,9 +135,7 @@ async fn run_test(mut test_file: TestFile) { } else { let mut options_with_tls = options.clone(); if requires_tls { - options_with_tls - .tls - .clone_from(&get_client_options().await.tls); + options_with_tls.tls = get_client_options().await.tls.clone(); } let client = Client::with_options(options_with_tls).unwrap(); diff --git a/src/test/spec/retryable_reads.rs b/src/test/spec/retryable_reads.rs index e5f1a5422..edf4972d6 100644 --- a/src/test/spec/retryable_reads.rs +++ b/src/test/spec/retryable_reads.rs @@ -138,8 +138,9 @@ async fn retry_read_pool_cleared() { .expect("pool clear should occur"); let next_cmap_events = subscriber - .collect_events(Duration::from_millis(1000), |event| { - matches!(event, Event::Cmap(_)) + .collect_events(Duration::from_millis(1000), |event| match event { + Event::Cmap(_) => true, + _ => false, }) .await; diff --git a/src/test/spec/retryable_writes.rs b/src/test/spec/retryable_writes.rs index f71214fe6..6eb616f48 100644 --- a/src/test/spec/retryable_writes.rs +++ b/src/test/spec/retryable_writes.rs @@ -49,7 +49,7 @@ async fn run_legacy() { continue; } let mut options = test_case.client_options.unwrap_or_default(); - options.hosts.clone_from(&get_client_options().await.hosts); + options.hosts = get_client_options().await.hosts.clone(); if options.heartbeat_freq.is_none() { options.heartbeat_freq = Some(MIN_HEARTBEAT_FREQUENCY); } @@ -455,8 +455,9 @@ async fn retry_write_pool_cleared() { .expect("pool clear should occur"); let next_cmap_events = subscriber - .collect_events(Duration::from_millis(1000), |event| { - matches!(event, Event::Cmap(_)) + .collect_events(Duration::from_millis(1000), |event| match event { + Event::Cmap(_) => true, + _ => false, }) .await; diff --git a/src/test/spec/trace.rs b/src/test/spec/trace.rs index 2829967be..1a51cfd6e 100644 --- a/src/test/spec/trace.rs +++ b/src/test/spec/trace.rs @@ -49,27 +49,27 @@ fn tracing_truncation() { assert_eq!(s, String::from("...")); // we should "round up" to the end of the first emoji - s.clone_from(&two_emoji); + s = two_emoji.clone(); truncate_on_char_boundary(&mut s, 1); assert_eq!(s, String::from("🤔...")); // 4 is a boundary, so we should truncate there - s.clone_from(&two_emoji); + s = two_emoji.clone(); truncate_on_char_boundary(&mut s, 4); assert_eq!(s, String::from("🤔...")); // we should round up to the full string - s.clone_from(&two_emoji); + s = two_emoji.clone(); truncate_on_char_boundary(&mut s, 5); assert_eq!(s, two_emoji); // end of string is a boundary, so we should truncate there - s.clone_from(&two_emoji); + s = two_emoji.clone(); truncate_on_char_boundary(&mut s, 8); assert_eq!(s, two_emoji); // we should get the full string back if the new length is longer than the original - s.clone_from(&two_emoji); + s = two_emoji.clone(); truncate_on_char_boundary(&mut s, 10); assert_eq!(s, two_emoji); } diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index 9be1a8b7e..058f135f2 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -124,7 +124,6 @@ pub(crate) trait TestOperation: Debug + Send + Sync { /// If this operation specifies entities to create, returns those entities. Otherwise, /// returns None. - #[cfg(feature = "tracing-unstable")] fn test_file_entities(&self) -> Option<&Vec> { None } @@ -2308,7 +2307,7 @@ impl TestOperation for RenameCollection { let target = test_runner.get_collection(id).await; let ns = target.namespace(); let mut to_ns = ns.clone(); - to_ns.coll.clone_from(&self.to); + to_ns.coll = self.to.clone(); let cmd = doc! { "renameCollection": crate::bson::to_bson(&ns)?, "to": crate::bson::to_bson(&to_ns)?, @@ -2742,7 +2741,6 @@ impl TestOperation for CreateEntities { .boxed() } - #[cfg(feature = "tracing-unstable")] fn test_file_entities(&self) -> Option<&Vec> { Some(&self.entities) } diff --git a/src/test/spec/unified_runner/test_file.rs b/src/test/spec/unified_runner/test_file.rs index 6bb2b68b7..7292b55f8 100644 --- a/src/test/spec/unified_runner/test_file.rs +++ b/src/test/spec/unified_runner/test_file.rs @@ -500,7 +500,7 @@ impl ExpectError { let description = description.as_ref(); if let Some(is_client_error) = self.is_client_error { - if is_client_error == error.is_server_error() { + if is_client_error != !error.is_server_error() { return Err(format!( "{}: expected client error but got {:?}", description, error From d8e33e33e5e6c0d6f69a2b08feece14d0328de8d Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 10 May 2024 17:31:46 -0600 Subject: [PATCH 4/5] skip test --- src/test/db.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/test/db.rs b/src/test/db.rs index cef9f9da5..72db2456a 100644 --- a/src/test/db.rs +++ b/src/test/db.rs @@ -393,6 +393,13 @@ async fn aggregate_with_generics() { let client = TestClient::new().await; let database = client.database("aggregate_with_generics"); + if client.server_version_lt(5, 1) { + log_uncaptured( + "skipping aggregate_with_generics: $documents agg stage only available on 5.1+", + ); + return; + } + // The cursor returned will contain these documents let basic_pipeline = vec![doc! { "$documents": [ { "str": "hi" } ] }]; From 7972cfcbd00f8a7f678856698a05590d0a9d7ca2 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Mon, 13 May 2024 08:36:58 -0600 Subject: [PATCH 5/5] test cleanup --- src/test/coll.rs | 5 +---- src/test/db.rs | 22 +++++----------------- 2 files changed, 6 insertions(+), 21 deletions(-) diff --git a/src/test/coll.rs b/src/test/coll.rs index 4d2fd6079..23c1350ca 100644 --- a/src/test/coll.rs +++ b/src/test/coll.rs @@ -1320,8 +1320,6 @@ async fn aggregate_with_generics() { len: i32, } - fn assert_document_cursor(_: Cursor) {} - let client = TestClient::new().await; let collection = client .database("aggregate_with_generics") @@ -1335,8 +1333,7 @@ async fn aggregate_with_generics() { // Assert at compile-time that the default cursor returned is a Cursor let basic_pipeline = vec![doc! { "$match": { "a": 1 } }]; - let cursor = collection.aggregate(basic_pipeline.clone()).await.unwrap(); - assert_document_cursor(cursor); + let _: Cursor = collection.aggregate(basic_pipeline).await.unwrap(); // Assert that data is properly deserialized when using with_type let project_pipeline = vec![doc! { "$project": { diff --git a/src/test/db.rs b/src/test/db.rs index 72db2456a..46b6d1088 100644 --- a/src/test/db.rs +++ b/src/test/db.rs @@ -383,13 +383,11 @@ async fn clustered_index_list_collections() { #[tokio::test] async fn aggregate_with_generics() { - #[derive(Debug, Deserialize, PartialEq)] + #[derive(Deserialize)] struct A { str: String, } - fn assert_document_cursor(_: Cursor) {} - let client = TestClient::new().await; let database = client.database("aggregate_with_generics"); @@ -401,23 +399,13 @@ async fn aggregate_with_generics() { } // The cursor returned will contain these documents - let basic_pipeline = vec![doc! { "$documents": [ { "str": "hi" } ] }]; + let pipeline = vec![doc! { "$documents": [ { "str": "hi" } ] }]; // Assert at compile-time that the default cursor returned is a Cursor - let cursor = database.aggregate(basic_pipeline.clone()).await.unwrap(); - assert_document_cursor(cursor); + let _: Cursor = database.aggregate(pipeline.clone()).await.unwrap(); // Assert that data is properly deserialized when using with_type - let mut cursor = database - .aggregate(basic_pipeline) - .with_type::() - .await - .unwrap(); + let mut cursor = database.aggregate(pipeline).with_type::().await.unwrap(); assert!(cursor.advance().await.unwrap()); - assert_eq!( - cursor.deserialize_current().unwrap(), - A { - str: "hi".to_string() - } - ); + assert_eq!(&cursor.deserialize_current().unwrap().str, "hi"); }