diff --git a/core/src/main/java/com/arangodb/ArangoDatabase.java b/core/src/main/java/com/arangodb/ArangoDatabase.java
index 9ef0327a5..8bef480f7 100644
--- a/core/src/main/java/com/arangodb/ArangoDatabase.java
+++ b/core/src/main/java/com/arangodb/ArangoDatabase.java
@@ -69,8 +69,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
* Returns the name of the used storage engine.
*
* @return the storage engine name
- * @see
- * API
+ * @see API
* Documentation
*/
ArangoDBEngine getEngine();
@@ -79,8 +78,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
* Checks whether the database exists
*
* @return true if the database exists, otherwise false
- * @see
- * API
+ * @see API
* Documentation
*/
boolean exists();
@@ -149,8 +147,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
*
* @param id The index-handle
* @return information about the index
- * @see
- * API Documentation
+ * @see API Documentation
*/
IndexEntity getIndex(String id);
@@ -159,8 +156,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
*
* @param id The index-handle
* @return the id of the index
- * @see
- * API Documentation
+ * @see API Documentation
*/
String deleteIndex(String id);
@@ -229,9 +225,9 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
*
* @param user The name of the user
* @param permissions The permissions the user grant
- * @since ArangoDB 3.2.0
* @see
* API Documentation
+ * @since ArangoDB 3.2.0
*/
void grantDefaultCollectionAccess(String user, Permissions permissions);
@@ -255,8 +251,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
* @param bindVars key/value pairs defining the variables to bind the query to
* @param options Additional options that will be passed to the query API, can be null
* @return cursor of the results
- * @see
- * API
+ * @see API
* Documentation
*/
ArangoCursor query(String query, Class type, Map bindVars, AqlQueryOptions options);
@@ -269,8 +264,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
* @param type The type of the result (POJO or {@link com.arangodb.util.RawData})
* @param options Additional options that will be passed to the query API, can be null
* @return cursor of the results
- * @see
- * API
+ * @see API
* Documentation
*/
ArangoCursor query(String query, Class type, AqlQueryOptions options);
@@ -283,8 +277,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
* @param type The type of the result (POJO or {@link com.arangodb.util.RawData})
* @param bindVars key/value pairs defining the variables to bind the query to
* @return cursor of the results
- * @see
- * API
+ * @see API
* Documentation
*/
ArangoCursor query(String query, Class type, Map bindVars);
@@ -296,8 +289,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
* @param query An AQL query string
* @param type The type of the result (POJO or {@link com.arangodb.util.RawData})
* @return cursor of the results
- * @see
- * API
+ * @see API
* Documentation
*/
ArangoCursor query(String query, Class type);
@@ -378,9 +370,25 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
* @return information about the query
* @see API
* Documentation
+ *
+ * @deprecated for removal, use {@link ArangoDatabase#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} instead
*/
+ @Deprecated
AqlQueryExplainEntity explainAqlQuery(String query, Map bindVars, AqlQueryExplainOptions options);
+
+ /**
+ * Explain an AQL query and return information about it
+ *
+ * @param query the query which you want explained
+ * @param bindVars key/value pairs representing the bind parameters
+ * @param options Additional options, can be null
+ * @return information about the query
+ * @see API
+ * Documentation
+ */
+ AqlQueryExplainEntity explainAqlQuery(String query, Map bindVars, ExplainAqlQueryOptions options);
+
/**
* Parse an AQL query and return information about it This method is for query validation only. To actually query
* the database, see {@link ArangoDatabase#query(String, Class, Map, AqlQueryOptions)}
@@ -575,8 +583,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
*
* @param options Additional options, can be null
* @return information about the transaction
- * @see
- * API
+ * @see API
* Documentation
* @since ArangoDB 3.5.0
*/
@@ -586,8 +593,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
* Aborts a Stream Transaction.
*
* @return information about the transaction
- * @see
- * API
+ * @see API
* Documentation
*/
StreamTransactionEntity abortStreamTransaction(String id);
@@ -596,8 +602,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
* Gets information about a Stream Transaction.
*
* @return information about the transaction
- * @see
- *
+ * @see
* API Documentation
* @since ArangoDB 3.5.0
*/
@@ -607,8 +612,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
* Gets all the currently running Stream Transactions.
*
* @return all the currently running Stream Transactions
- * @see
- *
+ * @see
* API Documentation
* @since ArangoDB 3.5.0
*/
@@ -618,8 +622,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
* Commits a Stream Transaction.
*
* @return information about the transaction
- * @see
- *
+ * @see
* API Documentation
* @since ArangoDB 3.5.0
*/
@@ -648,8 +651,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor {
* Fetches all views from the database and returns a list of view descriptions.
*
* @return list of information about all views
- * @see
- * API Documentation
+ * @see API Documentation
* @since ArangoDB 3.4.0
*/
Collection getViews();
diff --git a/core/src/main/java/com/arangodb/ArangoDatabaseAsync.java b/core/src/main/java/com/arangodb/ArangoDatabaseAsync.java
index 9ab7fb0c2..d7a56d79a 100644
--- a/core/src/main/java/com/arangodb/ArangoDatabaseAsync.java
+++ b/core/src/main/java/com/arangodb/ArangoDatabaseAsync.java
@@ -172,9 +172,17 @@ public interface ArangoDatabaseAsync extends ArangoSerdeAccessor {
/**
* Asynchronous version of {@link ArangoDatabase#explainAqlQuery(String, Map, AqlQueryExplainOptions)}
+ *
+ * @deprecated for removal, use {@link ArangoDatabaseAsync#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} instead
*/
+ @Deprecated
CompletableFuture explainAqlQuery(String query, Map bindVars, AqlQueryExplainOptions options);
+ /**
+ * Asynchronous version of {@link ArangoDatabase#explainAqlQuery(String, Map, ExplainAqlQueryOptions)}
+ */
+ CompletableFuture explainAqlQuery(String query, Map bindVars, ExplainAqlQueryOptions options);
+
/**
* Asynchronous version of {@link ArangoDatabase#parseQuery(String)}
*/
diff --git a/core/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java b/core/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java
index 1b2dece59..6f6448b14 100644
--- a/core/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java
+++ b/core/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java
@@ -20,11 +20,17 @@
package com.arangodb.entity;
+import com.arangodb.ArangoDatabase;
+import com.arangodb.model.ExplainAqlQueryOptions;
+
import java.util.Collection;
+import java.util.Map;
/**
* @author Mark Vollmary
+ * @deprecated for removal, use {@link ArangoDatabase#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} instead
*/
+@Deprecated
public final class AqlExecutionExplainEntity {
private ExecutionPlan plan;
diff --git a/core/src/main/java/com/arangodb/internal/ArangoDatabaseAsyncImpl.java b/core/src/main/java/com/arangodb/internal/ArangoDatabaseAsyncImpl.java
index 490333c5d..698335094 100644
--- a/core/src/main/java/com/arangodb/internal/ArangoDatabaseAsyncImpl.java
+++ b/core/src/main/java/com/arangodb/internal/ArangoDatabaseAsyncImpl.java
@@ -229,6 +229,11 @@ public CompletableFuture explainAqlQuery(
return executorAsync().execute(() -> explainQueryRequest(query, bindVars, options), AqlQueryExplainEntity.class);
}
+ @Override
+ public CompletableFuture explainAqlQuery(String query, Map bindVars, ExplainAqlQueryOptions options) {
+ return executorAsync().execute(() -> explainQueryRequest(query, bindVars, options), AqlQueryExplainEntity.class);
+ }
+
@Override
public CompletableFuture parseQuery(final String query) {
return executorAsync().execute(() -> parseQueryRequest(query), AqlParseEntity.class);
diff --git a/core/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java b/core/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java
index 3129d06aa..5a652467d 100644
--- a/core/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java
+++ b/core/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java
@@ -249,6 +249,11 @@ public AqlQueryExplainEntity explainAqlQuery(String query, Map b
return executorSync().execute(explainQueryRequest(query, bindVars, options), AqlQueryExplainEntity.class);
}
+ @Override
+ public AqlQueryExplainEntity explainAqlQuery(String query, Map bindVars, ExplainAqlQueryOptions options) {
+ return executorSync().execute(explainQueryRequest(query, bindVars, options), AqlQueryExplainEntity.class);
+ }
+
@Override
public AqlParseEntity parseQuery(final String query) {
return executorSync().execute(parseQueryRequest(query), AqlParseEntity.class);
diff --git a/core/src/main/java/com/arangodb/internal/InternalArangoDatabase.java b/core/src/main/java/com/arangodb/internal/InternalArangoDatabase.java
index 32a126f11..3fe3911aa 100644
--- a/core/src/main/java/com/arangodb/internal/InternalArangoDatabase.java
+++ b/core/src/main/java/com/arangodb/internal/InternalArangoDatabase.java
@@ -179,6 +179,13 @@ protected InternalRequest explainQueryRequest(final String query, final Map bindVars,
+ final ExplainAqlQueryOptions options) {
+ final ExplainAqlQueryOptions opt = options != null ? options : new ExplainAqlQueryOptions();
+ return request(name, RequestType.POST, PATH_API_EXPLAIN)
+ .setBody(getSerde().serialize(OptionsBuilder.build(opt, query, bindVars)));
+ }
+
protected InternalRequest parseQueryRequest(final String query) {
return request(name, RequestType.POST, PATH_API_QUERY).setBody(getSerde().serialize(OptionsBuilder.build(new AqlQueryParseOptions(), query)));
}
diff --git a/core/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java b/core/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java
index a5e4d6a79..80862ae4f 100644
--- a/core/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java
+++ b/core/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java
@@ -20,6 +20,7 @@
package com.arangodb.model;
+import com.arangodb.ArangoDatabase;
import com.arangodb.internal.serde.UserDataInside;
import java.util.Collection;
@@ -28,7 +29,10 @@
/**
* @author Mark Vollmary
* @author Michele Rastelli
+ *
+ * @deprecated for removal, use {@link ArangoDatabase#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} instead
*/
+@Deprecated
public final class AqlQueryExplainOptions {
private Map bindVars;
diff --git a/core/src/main/java/com/arangodb/model/AqlQueryOptions.java b/core/src/main/java/com/arangodb/model/AqlQueryOptions.java
index 91c551d14..f51f419b2 100644
--- a/core/src/main/java/com/arangodb/model/AqlQueryOptions.java
+++ b/core/src/main/java/com/arangodb/model/AqlQueryOptions.java
@@ -20,7 +20,11 @@
package com.arangodb.model;
+import com.arangodb.ArangoCursor;
import com.arangodb.internal.serde.UserDataInside;
+import com.fasterxml.jackson.annotation.JsonAnyGetter;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonInclude;
import java.util.*;
@@ -30,49 +34,40 @@
*/
public final class AqlQueryOptions extends TransactionalOptions implements Cloneable {
- private Boolean count;
- private Integer ttl;
+ private Boolean allowDirtyRead;
private Integer batchSize;
+ private Map bindVars;
private Boolean cache;
+ private Boolean count;
private Long memoryLimit;
- private Map bindVars;
- private String query;
private Options options;
- private Boolean allowDirtyRead;
+ private String query;
+ private Integer ttl;
@Override
AqlQueryOptions getThis() {
return this;
}
- public Boolean getCount() {
- return count;
- }
-
- /**
- * @param count indicates whether the number of documents in the result set should be returned in the "count"
- * attribute of the result. Calculating the "count" attribute might have a performance impact for some
- * queries in the future so this option is turned off by default, and "count" is only returned when
- * requested.
- * @return options
- */
- public AqlQueryOptions count(final Boolean count) {
- this.count = count;
- return this;
- }
-
- public Integer getTtl() {
- return ttl;
+ public Boolean getAllowDirtyRead() {
+ return allowDirtyRead;
}
/**
- * @param ttl The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically
- * after the specified amount of time. This is useful to ensure garbage collection of cursors that are
- * not fully fetched by clients. If not set, a server-defined value will be used.
- * @return options
+ * Sets the header {@code x-arango-allow-dirty-read} to {@code true} to allow the Coordinator to ask any shard
+ * replica for the data, not only the shard leader. This may result in “dirty reads”.
+ *
+ * The header is ignored if this operation is part of a Stream Transaction
+ * ({@link AqlQueryOptions#streamTransactionId(String)}). The header set when creating the transaction decides
+ * about dirty reads for the entire transaction, not the individual read operations.
+ *
+ * @param allowDirtyRead Set to {@code true} allows reading from followers in an active-failover setup.
+ * @return this
+ * @see API
+ * Documentation
*/
- public AqlQueryOptions ttl(final Integer ttl) {
- this.ttl = ttl;
+ public AqlQueryOptions allowDirtyRead(final Boolean allowDirtyRead) {
+ this.allowDirtyRead = allowDirtyRead;
return this;
}
@@ -82,32 +77,28 @@ public Integer getBatchSize() {
/**
* @param batchSize maximum number of result documents to be transferred from the server to the client in one
- * roundtrip.
- * If this attribute is not set, a server-controlled default value will be used. A batchSize
- * value of 0
- * is disallowed.
- * @return options
+ * roundtrip. If this attribute is not set, a server-controlled default value will be used.
+ * A batchSize value of 0 is disallowed.
+ * @return this
*/
public AqlQueryOptions batchSize(final Integer batchSize) {
this.batchSize = batchSize;
return this;
}
- public Long getMemoryLimit() {
- return memoryLimit;
+ @UserDataInside
+ public Map getBindVars() {
+ return bindVars;
}
/**
- * @param memoryLimit the maximum number of memory (measured in bytes) that the query is allowed to use. If set,
- * then the
- * query will fail with error "resource limit exceeded" in case it allocates too much memory.
- * A value of
- * 0 indicates that there is no memory limit.
- * @return options
- * @since ArangoDB 3.1.0
+ * @param bindVars A map with key/value pairs representing the bind parameters. For a bind variable {@code @var} in
+ * the query, specify the value using an attribute with the name {@code var}. For a collection bind
+ * variable {@code @@coll}, use {@code @coll} as the attribute name.
+ * @return this
*/
- public AqlQueryOptions memoryLimit(final Long memoryLimit) {
- this.memoryLimit = memoryLimit;
+ AqlQueryOptions bindVars(final Map bindVars) {
+ this.bindVars = bindVars;
return this;
}
@@ -116,497 +107,350 @@ public Boolean getCache() {
}
/**
- * @param cache flag to determine whether the AQL query cache shall be used. If set to false, then any query cache
- * lookup will be skipped for the query. If set to true, it will lead to the query cache being checked
- * for the query if the query cache mode is either on or demand.
- * @return options
+ * @param cache flag to determine whether the AQL query results cache shall be used. If set to false, then any
+ * query cache lookup will be skipped for the query. If set to true, it will lead to the query cache
+ * being checked for the query if the query cache mode is either on or demand.
+ * @return this
*/
public AqlQueryOptions cache(final Boolean cache) {
this.cache = cache;
return this;
}
- public Boolean getFillBlockCache() {
- return getOptions().fillBlockCache;
+ public Boolean getCount() {
+ return count;
}
/**
- * @param fillBlockCache if set to true
or not specified, this will make the query store
- * the data it reads via the RocksDB storage engine in the RocksDB block cache. This is
- * usually the desired behavior. The option can be set to false
for queries that
- * are known to either read a lot of data that would thrash the block cache, or for queries
- * that read data known to be outside of the hot set. By setting the option
- * to false
, data read by the query will not make it into the RocksDB block
- * cache if
- * it is not already in there, thus leaving more room for the actual hot set.
- * @return options
- * @since ArangoDB 3.8.1
+ * @param count indicates whether the number of documents in the result set should be returned and made accessible
+ * via {@link ArangoCursor#getCount()}. Calculating the {@code count} attribute might have a
+ * performance impact for some queries in the future so this option is turned off by default, and
+ * {@code count} is only returned when requested.
+ * @return this
*/
- public AqlQueryOptions fillBlockCache(final Boolean fillBlockCache) {
- getOptions().fillBlockCache = fillBlockCache;
+ public AqlQueryOptions count(final Boolean count) {
+ this.count = count;
return this;
}
- @UserDataInside
- public Map getBindVars() {
- return bindVars;
+ public Long getMemoryLimit() {
+ return memoryLimit;
}
/**
- * @param bindVarsBytes serialized bind parameters
- * @return options
+ * @param memoryLimit the maximum number of memory (measured in bytes) that the query is allowed to use. If set,
+ * then the query will fail with error {@code resource limit exceeded} in case it allocates too
+ * much memory. A value of {@code 0} indicates that there is no memory limit.
+ * @return this
+ * @since ArangoDB 3.1.0
*/
- AqlQueryOptions bindVars(final Map bindVarsBytes) {
- this.bindVars = bindVarsBytes;
+ public AqlQueryOptions memoryLimit(final Long memoryLimit) {
+ this.memoryLimit = memoryLimit;
return this;
}
- public String getQuery() {
- return query;
+ public Options getOptions() {
+ if (options == null) {
+ options = new Options();
+ }
+ return options;
}
/**
- * @param query the query which you want parse
- * @return options
+ * @param options extra options for the query
+ * @return this
*/
- public AqlQueryOptions query(final String query) {
- this.query = query;
+ public AqlQueryOptions options(final Options options) {
+ this.options = options;
return this;
}
- public Boolean getFailOnWarning() {
- return getOptions().failOnWarning;
- }
-
- /**
- * @param failOnWarning When set to true, the query will throw an exception and abort instead of producing a
- * warning. This
- * option should be used during development to catch potential issues early. When the
- * attribute is set to
- * false, warnings will not be propagated to exceptions and will be returned with the query
- * result. There
- * is also a server configuration option --query.fail-on-warning for setting the default
- * value for
- * failOnWarning so it does not need to be set on a per-query level.
- * @return options
- */
- public AqlQueryOptions failOnWarning(final Boolean failOnWarning) {
- getOptions().failOnWarning = failOnWarning;
- return this;
+ public String getQuery() {
+ return query;
}
/**
- * @param timeout The query has to be executed within the given runtime or it will be killed. The value is specified
- * in seconds. The default value is 0.0 (no timeout).
- * @return options
+ * @param query the query to be executed
+ * @return this
*/
- public AqlQueryOptions maxRuntime(final Double timeout) {
- getOptions().maxRuntime = timeout;
+ public AqlQueryOptions query(final String query) {
+ this.query = query;
return this;
}
- /**
- * @return If set to true, then the additional query profiling information will be returned in the sub-attribute
- * profile of the extra return attribute if the query result is not served from the query cache.
- */
- public Boolean getProfile() {
- return getOptions().profile;
+ public Integer getTtl() {
+ return ttl;
}
/**
- * @param profile If set to true, then the additional query profiling information will be returned in the
- * sub-attribute
- * profile of the extra return attribute if the query result is not served from the query cache.
- * @return options
+ * @param ttl The time-to-live for the cursor (in seconds). If the result set is small enough (less than or equal
+ * to batchSize) then results are returned right away. Otherwise, they are stored in memory and will be
+ * accessible via the cursor with respect to the ttl. The cursor will be removed on the server
+ * automatically after the specified amount of time. This is useful to ensure garbage collection of
+ * cursors that are not fully fetched by clients.
+ *
+ * If not set, a server-defined value will be used (default: 30 seconds).
+ *
+ * The time-to-live is renewed upon every access to the cursor.
+ * @return this
*/
- public AqlQueryOptions profile(final Boolean profile) {
- getOptions().profile = profile;
+ public AqlQueryOptions ttl(final Integer ttl) {
+ this.ttl = ttl;
return this;
}
- public Long getMaxTransactionSize() {
- return getOptions().maxTransactionSize;
+ @Override
+ public AqlQueryOptions clone() {
+ try {
+ AqlQueryOptions clone = (AqlQueryOptions) super.clone();
+ clone.bindVars = bindVars != null ? new HashMap<>(bindVars) : null;
+ clone.options = options != null ? options.clone() : null;
+ return clone;
+ } catch (CloneNotSupportedException e) {
+ throw new AssertionError();
+ }
}
- /**
- * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only.
- * @return options
- * @since ArangoDB 3.2.0
- */
- public AqlQueryOptions maxTransactionSize(final Long maxTransactionSize) {
- getOptions().maxTransactionSize = maxTransactionSize;
- return this;
- }
+ public static final class Options implements Cloneable {
+ private Map customOptions;
+ private Boolean allPlans;
+ private Boolean allowDirtyReads;
+ private Boolean allowRetry;
+ private Boolean failOnWarning;
+ private Boolean fillBlockCache;
+ private String forceOneShardAttributeValue;
+ private Boolean fullCount;
+ private Long intermediateCommitCount;
+ private Long intermediateCommitSize;
+ private Integer maxDNFConditionMembers;
+ private Integer maxNodesPerCallstack;
+ private Integer maxNumberOfPlans;
+ private Double maxRuntime;
+ private Long maxTransactionSize;
+ private Long maxWarningCount;
+ private Optimizer optimizer;
+ private Boolean profile;
+ private Double satelliteSyncWait;
+ private Collection shardIds;
+ private Boolean skipInaccessibleCollections;
+ private Long spillOverThresholdMemoryUsage;
+ private Long spillOverThresholdNumRows;
+ private Boolean stream;
- public Long getMaxWarningCount() {
- return getOptions().maxWarningCount;
- }
+ @JsonInclude
+ @JsonAnyGetter
+ public Map getCustomOptions() {
+ if (customOptions == null) {
+ customOptions = new HashMap<>();
+ }
+ return customOptions;
+ }
- /**
- * @param maxWarningCount Limits the maximum number of warnings a query will return. The number of warnings a
- * query will return
- * is limited to 10 by default, but that number can be increased or decreased by setting
- * this attribute.
- * @return options
- * @since ArangoDB 3.2.0
- */
- public AqlQueryOptions maxWarningCount(final Long maxWarningCount) {
- getOptions().maxWarningCount = maxWarningCount;
- return this;
- }
+ public void setCustomOption(String key, Object value) {
+ getCustomOptions().put(key, value);
+ }
- public Long getIntermediateCommitCount() {
- return getOptions().intermediateCommitCount;
- }
+ public Boolean getAllPlans() {
+ return allPlans;
+ }
- /**
- * @param intermediateCommitCount Maximum number of operations after which an intermediate commit is performed
- * automatically. Honored by
- * the RocksDB storage engine only.
- * @return options
- * @since ArangoDB 3.2.0
- */
- public AqlQueryOptions intermediateCommitCount(final Long intermediateCommitCount) {
- getOptions().intermediateCommitCount = intermediateCommitCount;
- return this;
- }
+ public Boolean getAllowDirtyReads() {
+ return allowDirtyReads;
+ }
- public Long getIntermediateCommitSize() {
- return getOptions().intermediateCommitSize;
- }
+ public Boolean getAllowRetry() {
+ return allowRetry;
+ }
- /**
- * @param intermediateCommitSize Maximum total size of operations after which an intermediate commit is performed
- * automatically.
- * Honored by the RocksDB storage engine only.
- * @return options
- * @since ArangoDB 3.2.0
- */
- public AqlQueryOptions intermediateCommitSize(final Long intermediateCommitSize) {
- getOptions().intermediateCommitSize = intermediateCommitSize;
- return this;
- }
+ public Boolean getFailOnWarning() {
+ return failOnWarning;
+ }
- public Double getSatelliteSyncWait() {
- return getOptions().satelliteSyncWait;
- }
+ public Boolean getFillBlockCache() {
+ return fillBlockCache;
+ }
- /**
- * @param satelliteSyncWait This enterprise parameter allows to configure how long a DBServer will have time to
- * bring the
- * satellite collections involved in the query into sync. The default value is 60.0
- * (seconds). When the
- * max time has been reached the query will be stopped.
- * @return options
- * @since ArangoDB 3.2.0
- */
- public AqlQueryOptions satelliteSyncWait(final Double satelliteSyncWait) {
- getOptions().satelliteSyncWait = satelliteSyncWait;
- return this;
- }
+ public String getForceOneShardAttributeValue() {
+ return forceOneShardAttributeValue;
+ }
- public Boolean getSkipInaccessibleCollections() {
- return getOptions().skipInaccessibleCollections;
- }
+ public Boolean getFullCount() {
+ return fullCount;
+ }
- /**
- * @param skipInaccessibleCollections AQL queries (especially graph traversals) will treat collection to which a
- * user has no access rights
- * as if these collections were empty. Instead of returning a forbidden access
- * error, your queries will
- * execute normally. This is intended to help with certain use-cases: A graph
- * contains several
- * collections and different users execute AQL queries on that graph. You can
- * now naturally limit the
- * accessible results by changing the access rights of users on collections.
- * This feature is only
- * available in the Enterprise Edition.
- * @return options
- * @since ArangoDB 3.2.0
- */
- public AqlQueryOptions skipInaccessibleCollections(final Boolean skipInaccessibleCollections) {
- getOptions().skipInaccessibleCollections = skipInaccessibleCollections;
- return this;
- }
+ public Long getIntermediateCommitCount() {
+ return intermediateCommitCount;
+ }
- public Boolean getFullCount() {
- return getOptions().fullCount;
- }
+ public Long getIntermediateCommitSize() {
+ return intermediateCommitSize;
+ }
- /**
- * @param fullCount if set to true and the query contains a LIMIT clause, then the result will have an extra
- * attribute
- * with the sub-attributes stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 }
- * } }. The
- * fullCount attribute will contain the number of documents in the result before the last LIMIT
- * in the
- * query was applied. It can be used to count the number of documents that match certain filter
- * criteria,
- * but only return a subset of them, in one go. It is thus similar to MySQL's
- * SQL_CALC_FOUND_ROWS hint.
- * Note that setting the option will disable a few LIMIT optimizations and may lead to more
- * documents
- * being processed, and thus make queries run longer. Note that the fullCount attribute will
- * only be
- * present in the result if the query has a LIMIT clause and the LIMIT clause is actually used
- * in the
- * query.
- * @return options
- */
- public AqlQueryOptions fullCount(final Boolean fullCount) {
- getOptions().fullCount = fullCount;
- return this;
- }
+ public Integer getMaxDNFConditionMembers() {
+ return maxDNFConditionMembers;
+ }
- public Integer getMaxPlans() {
- return getOptions().maxPlans;
- }
+ public Integer getMaxNodesPerCallstack() {
+ return maxNodesPerCallstack;
+ }
- /**
- * @param maxPlans Limits the maximum number of plans that are created by the AQL query optimizer.
- * @return options
- */
- public AqlQueryOptions maxPlans(final Integer maxPlans) {
- getOptions().maxPlans = maxPlans;
- return this;
- }
+ public Integer getMaxNumberOfPlans() {
+ return maxNumberOfPlans;
+ }
- public Collection getRules() {
- return getOptions().getOptimizer().rules;
- }
+ /**
+ * @deprecated for removal, use {@link Options#getMaxNumberOfPlans()} instead
+ */
+ @Deprecated
+ @JsonIgnore
+ public Integer getMaxPlans() {
+ return getMaxNumberOfPlans();
+ }
- /**
- * @param rules A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute,
- * telling the
- * optimizer to include or exclude specific rules. To disable a rule, prefix its name with a -, to
- * enable
- * a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules
- * @return options
- */
- public AqlQueryOptions rules(final Collection rules) {
- getOptions().getOptimizer().rules = rules;
- return this;
- }
+ public Double getMaxRuntime() {
+ return maxRuntime;
+ }
- public Boolean getStream() {
- return getOptions().stream;
- }
+ public Long getMaxTransactionSize() {
+ return maxTransactionSize;
+ }
- /**
- * @param stream Specify true and the query will be executed in a streaming fashion. The query result is not
- * stored on
- * the server, but calculated on the fly. Beware: long-running queries will need to hold the
- * collection
- * locks for as long as the query cursor exists. When set to false a query will be executed right
- * away in
- * its entirety. In that case query results are either returned right away (if the resultset is small
- * enough), or stored on the arangod instance and accessible via the cursor API (with respect to the
- * ttl). It is advisable to only use this option on short-running queries or without exclusive locks
- * (write-locks on MMFiles). Please note that the query options cache, count and fullCount will not
- * work
- * on streaming queries. Additionally query statistics, warnings and profiling data will only be
- * available after the query is finished. The default value is false
- * @return options
- * @since ArangoDB 3.4.0
- */
- public AqlQueryOptions stream(final Boolean stream) {
- getOptions().stream = stream;
- return this;
- }
+ public Long getMaxWarningCount() {
+ return maxWarningCount;
+ }
- public Collection getShardIds() {
- return getOptions().shardIds;
- }
+ public Optimizer getOptimizer() {
+ if (optimizer == null) {
+ optimizer = new Optimizer();
+ }
+ return optimizer;
+ }
- /**
- * Restrict query to shards by given ids. This is an internal option. Use at your own risk.
- *
- * @param shardIds
- * @return options
- */
- public AqlQueryOptions shardIds(final String... shardIds) {
- getOptions().shardIds = Arrays.asList(shardIds);
- return this;
- }
+ public Boolean getProfile() {
+ return profile;
+ }
- public String getForceOneShardAttributeValue() {
- return options != null ? options.forceOneShardAttributeValue : null;
- }
+ public Double getSatelliteSyncWait() {
+ return satelliteSyncWait;
+ }
- /**
- * @param forceOneShardAttributeValue This query option can be used in complex queries in case the query optimizer
- * cannot automatically detect that the query can be limited to only a single
- * server (e.g. in a disjoint smart graph case).
- *
- * If the option is set incorrectly, i.e. to a wrong shard key value, then the
- * query may be shipped to a wrong DB server and may not return results (i.e.
- * empty result set).
- *
- * Use at your own risk.
- * @return options
- */
- public AqlQueryOptions forceOneShardAttributeValue(final String forceOneShardAttributeValue) {
- getOptions().forceOneShardAttributeValue = forceOneShardAttributeValue;
- return this;
- }
+ public Collection getShardIds() {
+ return shardIds;
+ }
- public Options getOptions() {
- if (options == null) {
- options = new Options();
+ public Boolean getSkipInaccessibleCollections() {
+ return skipInaccessibleCollections;
}
- return options;
- }
- /**
- * @param allowDirtyRead Set to {@code true} allows reading from followers in an active-failover setup.
- * @return options
- * @see API
- * Documentation
- * @since ArangoDB 3.4.0
- */
- public AqlQueryOptions allowDirtyRead(final Boolean allowDirtyRead) {
- this.allowDirtyRead = allowDirtyRead;
- return this;
- }
+ public Long getSpillOverThresholdMemoryUsage() {
+ return spillOverThresholdMemoryUsage;
+ }
- public Boolean getAllowDirtyRead() {
- return allowDirtyRead;
- }
+ public Long getSpillOverThresholdNumRows() {
+ return spillOverThresholdNumRows;
+ }
- public Boolean getAllowRetry() {
- return getOptions().allowRetry;
- }
+ public Boolean getStream() {
+ return stream;
+ }
- /**
- * @param allowRetry Set this option to true to make it possible to retry fetching the latest batch from a cursor.
- *
- * This makes possible to safely retry invoking {@link com.arangodb.ArangoCursor#next()} in
- * case of I/O exceptions (which are actually thrown as {@link com.arangodb.ArangoDBException}
- * with cause {@link java.io.IOException})
- *
- * If set to false (default), then it is not safe to retry invoking
- * {@link com.arangodb.ArangoCursor#next()} in case of I/O exceptions, since the request to
- * fetch the next batch is not idempotent (i.e. the cursor may advance multiple times on the
- * server).
- *
- * Note: once you successfully received the last batch, you should call
- * {@link com.arangodb.ArangoCursor#close()} so that the server does not unnecessary keep the
- * batch until the cursor times out ({@link AqlQueryOptions#ttl(Integer)}).
- * @return options
- * @since ArangoDB 3.11
- */
- public AqlQueryOptions allowRetry(final Boolean allowRetry) {
- getOptions().allowRetry = allowRetry;
- return this;
- }
+ public void setAllPlans(Boolean allPlans) {
+ this.allPlans = allPlans;
+ }
- @Override
- public AqlQueryOptions clone() {
- try {
- AqlQueryOptions clone = (AqlQueryOptions) super.clone();
- clone.bindVars = bindVars != null ? new HashMap<>(bindVars) : null;
- clone.options = options != null ? options.clone() : null;
- return clone;
- } catch (CloneNotSupportedException e) {
- throw new AssertionError();
+ public void setAllowDirtyReads(Boolean allowDirtyReads) {
+ this.allowDirtyReads = allowDirtyReads;
}
- }
- public static final class Options implements Cloneable {
+ public void setAllowRetry(Boolean allowRetry) {
+ this.allowRetry = allowRetry;
+ }
- private Boolean failOnWarning;
- private Boolean profile;
- private Long maxTransactionSize;
- private Long maxWarningCount;
- private Long intermediateCommitCount;
- private Long intermediateCommitSize;
- private Double satelliteSyncWait;
- private Boolean skipInaccessibleCollections;
- private Optimizer optimizer;
- private Boolean fullCount;
- private Integer maxPlans;
- private Boolean stream;
- private Collection shardIds;
- private Double maxRuntime;
- private Boolean fillBlockCache;
- private String forceOneShardAttributeValue;
- private Boolean allowRetry;
+ public void setFailOnWarning(Boolean failOnWarning) {
+ this.failOnWarning = failOnWarning;
+ }
- public Boolean getFailOnWarning() {
- return failOnWarning;
+ public void setFillBlockCache(Boolean fillBlockCache) {
+ this.fillBlockCache = fillBlockCache;
}
- public Boolean getProfile() {
- return profile;
+ public void setForceOneShardAttributeValue(String forceOneShardAttributeValue) {
+ this.forceOneShardAttributeValue = forceOneShardAttributeValue;
}
- public Long getMaxTransactionSize() {
- return maxTransactionSize;
+ public void setFullCount(Boolean fullCount) {
+ this.fullCount = fullCount;
}
- public Long getMaxWarningCount() {
- return maxWarningCount;
+ public void setIntermediateCommitCount(Long intermediateCommitCount) {
+ this.intermediateCommitCount = intermediateCommitCount;
}
- public Long getIntermediateCommitCount() {
- return intermediateCommitCount;
+ public void setIntermediateCommitSize(Long intermediateCommitSize) {
+ this.intermediateCommitSize = intermediateCommitSize;
}
- public Long getIntermediateCommitSize() {
- return intermediateCommitSize;
+ public void setMaxDNFConditionMembers(Integer maxDNFConditionMembers) {
+ this.maxDNFConditionMembers = maxDNFConditionMembers;
}
- public Double getSatelliteSyncWait() {
- return satelliteSyncWait;
+ public void setMaxNodesPerCallstack(Integer maxNodesPerCallstack) {
+ this.maxNodesPerCallstack = maxNodesPerCallstack;
}
- public Boolean getSkipInaccessibleCollections() {
- return skipInaccessibleCollections;
+ public void setMaxNumberOfPlans(Integer maxNumberOfPlans) {
+ this.maxNumberOfPlans = maxNumberOfPlans;
}
- public Boolean getFullCount() {
- return fullCount;
+ public void setMaxRuntime(Double maxRuntime) {
+ this.maxRuntime = maxRuntime;
}
- public Integer getMaxPlans() {
- return maxPlans;
+ public void setMaxTransactionSize(Long maxTransactionSize) {
+ this.maxTransactionSize = maxTransactionSize;
}
- public Boolean getStream() {
- return stream;
+ public void setMaxWarningCount(Long maxWarningCount) {
+ this.maxWarningCount = maxWarningCount;
}
- public Double getMaxRuntime() {
- return maxRuntime;
+ public void setOptimizer(Optimizer optimizer) {
+ this.optimizer = optimizer;
}
- public Boolean getFillBlockCache() {
- return fillBlockCache;
+ public void setProfile(Boolean profile) {
+ this.profile = profile;
}
- public String getForceOneShardAttributeValue() {
- return forceOneShardAttributeValue;
+ public void setSatelliteSyncWait(Double satelliteSyncWait) {
+ this.satelliteSyncWait = satelliteSyncWait;
}
- public Optimizer getOptimizer() {
- if (optimizer == null) {
- optimizer = new Optimizer();
- }
- return optimizer;
+ public void setShardIds(Collection shardIds) {
+ this.shardIds = shardIds;
}
- public Collection getShardIds() {
- return shardIds;
+ public void setSkipInaccessibleCollections(Boolean skipInaccessibleCollections) {
+ this.skipInaccessibleCollections = skipInaccessibleCollections;
}
- public Boolean getAllowRetry() {
- return allowRetry;
+ public void setSpillOverThresholdMemoryUsage(Long spillOverThresholdMemoryUsage) {
+ this.spillOverThresholdMemoryUsage = spillOverThresholdMemoryUsage;
+ }
+
+ public void setSpillOverThresholdNumRows(Long spillOverThresholdNumRows) {
+ this.spillOverThresholdNumRows = spillOverThresholdNumRows;
+ }
+
+ public void setStream(Boolean stream) {
+ this.stream = stream;
}
@Override
public Options clone() {
try {
Options clone = (Options) super.clone();
+ clone.customOptions = customOptions != null ? new HashMap<>(customOptions) : null;
clone.optimizer = optimizer != null ? optimizer.clone() : null;
clone.shardIds = shardIds != null ? new ArrayList<>(shardIds) : null;
return clone;
@@ -623,6 +467,10 @@ public Collection getRules() {
return rules;
}
+ public void setRules(Collection rules) {
+ this.rules = rules;
+ }
+
@Override
public Optimizer clone() {
try {
@@ -635,4 +483,522 @@ public Optimizer clone() {
}
}
+ // ------------------------------------
+ // --- accessors for nested options ---
+ // ------------------------------------
+
+ @JsonIgnore
+ public Map getCustomOptions() {
+ return getOptions().getCustomOptions();
+ }
+
+ /**
+ * Set an additional custom option in the form of key-value pair.
+ *
+ * @param key option name
+ * @param value option value
+ * @return this
+ */
+ public AqlQueryOptions customOption(String key, Object value) {
+ getOptions().setCustomOption(key, value);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getAllowDirtyReads() {
+ return getOptions().getAllowDirtyReads();
+ }
+
+ /**
+ * @param allowDirtyReads If you set this option to true and execute the query against a cluster deployment, then
+ * the Coordinator is allowed to read from any shard replica and not only from the leader.
+ * You may observe data inconsistencies (dirty reads) when reading from followers, namely
+ * obsolete revisions of documents because changes have not yet been replicated to the
+ * follower, as well as changes to documents before they are officially committed on the
+ * leader. This feature is only available in the Enterprise Edition.
+ * @return this
+ */
+ public AqlQueryOptions allowDirtyReads(final Boolean allowDirtyReads) {
+ getOptions().setAllowDirtyReads(allowDirtyReads);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getAllowRetry() {
+ return getOptions().getAllowRetry();
+ }
+
+ /**
+ * @param allowRetry Set this option to true to make it possible to retry fetching the latest batch from a cursor.
+ *
+ * This makes possible to safely retry invoking {@link com.arangodb.ArangoCursor#next()} in
+ * case of I/O exceptions (which are actually thrown as {@link com.arangodb.ArangoDBException}
+ * with cause {@link java.io.IOException})
+ *
+ * If set to false (default), then it is not safe to retry invoking
+ * {@link com.arangodb.ArangoCursor#next()} in case of I/O exceptions, since the request to
+ * fetch the next batch is not idempotent (i.e. the cursor may advance multiple times on the
+ * server).
+ *
+ * Note: once you successfully received the last batch, you should call
+ * {@link com.arangodb.ArangoCursor#close()} so that the server does not unnecessary keep the
+ * batch until the cursor times out ({@link AqlQueryOptions#ttl(Integer)}).
+ * @return this
+ * @since ArangoDB 3.11
+ */
+ public AqlQueryOptions allowRetry(final Boolean allowRetry) {
+ getOptions().setAllowRetry(allowRetry);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getFailOnWarning() {
+ return getOptions().getFailOnWarning();
+ }
+
+ /**
+ * @param failOnWarning When set to true, the query will throw an exception and abort instead of producing a
+ * warning. This option should be used during development to catch potential issues early.
+ * When the attribute is set to false, warnings will not be propagated to exceptions and will
+ * be returned with the query result. There is also a server configuration option
+ * --query.fail-on-warning for setting the default value for failOnWarning so it does not
+ * need to be set on a per-query level.
+ * @return this
+ */
+ public AqlQueryOptions failOnWarning(final Boolean failOnWarning) {
+ getOptions().setFailOnWarning(failOnWarning);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getFillBlockCache() {
+ return getOptions().getFillBlockCache();
+ }
+
+ /**
+ * @param fillBlockCache if set to true
or not specified, this will make the query store
+ * the data it reads via the RocksDB storage engine in the RocksDB block cache. This is
+ * usually the desired behavior. The option can be set to false
for queries that
+ * are known to either read a lot of data that would thrash the block cache, or for queries
+ * that read data known to be outside of the hot set. By setting the option
+ * to false
, data read by the query will not make it into the RocksDB block
+ * cache if it is not already in there, thus leaving more room for the actual hot set.
+ * @return this
+ * @since ArangoDB 3.8.1
+ */
+ public AqlQueryOptions fillBlockCache(final Boolean fillBlockCache) {
+ getOptions().setFillBlockCache(fillBlockCache);
+ return this;
+ }
+
+ @JsonIgnore
+ public String getForceOneShardAttributeValue() {
+ return getOptions().getForceOneShardAttributeValue();
+ }
+
+ /**
+ * @param forceOneShardAttributeValue This query option can be used in complex queries in case the query optimizer
+ * cannot automatically detect that the query can be limited to only a single
+ * server (e.g. in a disjoint smart graph case).
+ *
+ * If the option is set incorrectly, i.e. to a wrong shard key value, then the
+ * query may be shipped to a wrong DB server and may not return results (i.e.
+ * empty result set).
+ *
+ * Use at your own risk.
+ * @return this
+ */
+ public AqlQueryOptions forceOneShardAttributeValue(final String forceOneShardAttributeValue) {
+ getOptions().setForceOneShardAttributeValue(forceOneShardAttributeValue);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getFullCount() {
+ return getOptions().getFullCount();
+ }
+
+ /**
+ * @param fullCount if set to true and the query contains a LIMIT clause, then the result will have an extra
+ * attribute
+ * with the sub-attributes stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 }
+ * } }. The
+ * fullCount attribute will contain the number of documents in the result before the last LIMIT
+ * in the
+ * query was applied. It can be used to count the number of documents that match certain filter
+ * criteria,
+ * but only return a subset of them, in one go. It is thus similar to MySQL's
+ * SQL_CALC_FOUND_ROWS hint.
+ * Note that setting the option will disable a few LIMIT optimizations and may lead to more
+ * documents
+ * being processed, and thus make queries run longer. Note that the fullCount attribute will
+ * only be
+ * present in the result if the query has a LIMIT clause and the LIMIT clause is actually used
+ * in the
+ * query.
+ * @return this
+ */
+ public AqlQueryOptions fullCount(final Boolean fullCount) {
+ getOptions().setFullCount(fullCount);
+ return this;
+ }
+
+ @JsonIgnore
+ public Long getIntermediateCommitCount() {
+ return getOptions().getIntermediateCommitCount();
+ }
+
+ /**
+ * @param intermediateCommitCount Maximum number of operations after which an intermediate commit is performed
+ * automatically. Honored by
+ * the RocksDB storage engine only.
+ * @return this
+ * @since ArangoDB 3.2.0
+ */
+ public AqlQueryOptions intermediateCommitCount(final Long intermediateCommitCount) {
+ getOptions().setIntermediateCommitCount(intermediateCommitCount);
+ return this;
+ }
+
+ @JsonIgnore
+ public Long getIntermediateCommitSize() {
+ return getOptions().getIntermediateCommitSize();
+ }
+
+ /**
+ * @param intermediateCommitSize Maximum total size of operations after which an intermediate commit is performed
+ * automatically.
+ * Honored by the RocksDB storage engine only.
+ * @return this
+ * @since ArangoDB 3.2.0
+ */
+ public AqlQueryOptions intermediateCommitSize(final Long intermediateCommitSize) {
+ getOptions().setIntermediateCommitSize(intermediateCommitSize);
+ return this;
+ }
+
+ @JsonIgnore
+ public Integer getMaxDNFConditionMembers() {
+ return getOptions().getMaxDNFConditionMembers();
+ }
+
+ /**
+ * @param maxDNFConditionMembers A threshold for the maximum number of OR sub-nodes in the internal representation
+ * of an AQL FILTER condition.
+ *
+ * Yon can use this option to limit the computation time and memory usage when
+ * converting complex AQL FILTER conditions into the internal DNF (disjunctive normal
+ * form) format. FILTER conditions with a lot of logical branches (AND, OR, NOT) can
+ * take a large amount of processing time and memory. This query option limits the
+ * computation time and memory usage for such conditions.
+ *
+ * Once the threshold value is reached during the DNF conversion of a FILTER
+ * condition, the conversion is aborted, and the query continues with a simplified
+ * internal representation of the condition, which cannot be used for index lookups.
+ *
+ * You can set the threshold globally instead of per query with the
+ * --query.max-dnf-condition-members startup option.
+ * @return this
+ */
+ public AqlQueryOptions maxDNFConditionMembers(final Integer maxDNFConditionMembers) {
+ getOptions().setMaxDNFConditionMembers(maxDNFConditionMembers);
+ return this;
+ }
+
+ @JsonIgnore
+ public Integer getMaxNodesPerCallstack() {
+ return getOptions().getMaxNodesPerCallstack();
+ }
+
+ /**
+ * @param maxNodesPerCallstack The number of execution nodes in the query plan after that stack splitting is
+ * performed to avoid a potential stack overflow. Defaults to the configured value of
+ * the startup option --query.max-nodes-per-callstack.
+ *
+ * This option is only useful for testing and debugging and normally does not need any
+ * adjustment.
+ * @return this
+ */
+ public AqlQueryOptions maxNodesPerCallstack(final Integer maxNodesPerCallstack) {
+ getOptions().setMaxNodesPerCallstack(maxNodesPerCallstack);
+ return this;
+ }
+
+ @JsonIgnore
+ public Integer getMaxNumberOfPlans() {
+ return getOptions().getMaxNumberOfPlans();
+ }
+
+ /**
+ * @param maxNumberOfPlans Limits the maximum number of plans that are created by the AQL query optimizer.
+ * @return this
+ */
+ public AqlQueryOptions maxNumberOfPlans(final Integer maxNumberOfPlans) {
+ getOptions().setMaxNumberOfPlans(maxNumberOfPlans);
+ return this;
+ }
+
+ /**
+ * @deprecated for removal, use {@link AqlQueryOptions#getMaxNumberOfPlans()} instead
+ */
+ @Deprecated
+ @JsonIgnore
+ public Integer getMaxPlans() {
+ return getMaxNumberOfPlans();
+ }
+
+ /**
+ * @param maxPlans Limits the maximum number of plans that are created by the AQL query optimizer.
+ * @return this
+ * @deprecated for removal, use {@link AqlQueryOptions#maxNumberOfPlans(Integer)} instead
+ */
+ @Deprecated
+ public AqlQueryOptions maxPlans(final Integer maxPlans) {
+ return maxNumberOfPlans(maxPlans);
+ }
+
+ @JsonIgnore
+ public Double getMaxRuntime() {
+ return getOptions().getMaxRuntime();
+ }
+
+ /**
+ * @param maxRuntime The query has to be executed within the given runtime or it will be killed. The value is specified
+ * in seconds. The default value is 0.0 (no timeout).
+ * @return this
+ */
+ public AqlQueryOptions maxRuntime(final Double maxRuntime) {
+ getOptions().setMaxRuntime(maxRuntime);
+ return this;
+ }
+
+ @JsonIgnore
+ public Long getMaxTransactionSize() {
+ return getOptions().getMaxTransactionSize();
+ }
+
+ /**
+ * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only.
+ * @return this
+ * @since ArangoDB 3.2.0
+ */
+ public AqlQueryOptions maxTransactionSize(final Long maxTransactionSize) {
+ getOptions().setMaxTransactionSize(maxTransactionSize);
+ return this;
+ }
+
+ @JsonIgnore
+ public Long getMaxWarningCount() {
+ return getOptions().getMaxWarningCount();
+ }
+
+ /**
+ * @param maxWarningCount Limits the maximum number of warnings a query will return. The number of warnings a
+ * query will return
+ * is limited to 10 by default, but that number can be increased or decreased by setting
+ * this attribute.
+ * @return this
+ * @since ArangoDB 3.2.0
+ */
+ public AqlQueryOptions maxWarningCount(final Long maxWarningCount) {
+ getOptions().setMaxWarningCount(maxWarningCount);
+ return this;
+ }
+
+ @JsonIgnore
+ public Optimizer getOptimizer() {
+ return getOptions().getOptimizer();
+ }
+
+ /**
+ * @param optimizer Options related to the query optimizer.
+ * @return this
+ */
+ public AqlQueryOptions optimizer(final Optimizer optimizer) {
+ getOptions().setOptimizer(optimizer);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getProfile() {
+ return getOptions().getProfile();
+ }
+
+ /**
+ * @param profile If set to true, then the additional query profiling information will be returned in the
+ * sub-attribute
+ * profile of the extra return attribute if the query result is not served from the query cache.
+ * @return this
+ */
+ public AqlQueryOptions profile(final Boolean profile) {
+ getOptions().setProfile(profile);
+ return this;
+ }
+
+ @JsonIgnore
+ public Double getSatelliteSyncWait() {
+ return getOptions().getSatelliteSyncWait();
+ }
+
+ /**
+ * @param satelliteSyncWait This enterprise parameter allows to configure how long a DBServer will have time to
+ * bring the
+ * satellite collections involved in the query into sync. The default value is 60.0
+ * (seconds). When the
+ * max time has been reached the query will be stopped.
+ * @return this
+ * @since ArangoDB 3.2.0
+ */
+ public AqlQueryOptions satelliteSyncWait(final Double satelliteSyncWait) {
+ getOptions().setSatelliteSyncWait(satelliteSyncWait);
+ return this;
+ }
+
+ @JsonIgnore
+ public Collection getShardIds() {
+ return getOptions().getShardIds();
+ }
+
+ /**
+ * Restrict query to shards by given ids. This is an internal option. Use at your own risk.
+ *
+ * @param shardIds
+ * @return this
+ */
+ public AqlQueryOptions shardIds(final String... shardIds) {
+ getOptions().setShardIds(Arrays.asList(shardIds));
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getSkipInaccessibleCollections() {
+ return getOptions().getSkipInaccessibleCollections();
+ }
+
+ /**
+ * @param skipInaccessibleCollections AQL queries (especially graph traversals) will treat collection to which a
+ * user has no access rights
+ * as if these collections were empty. Instead of returning a forbidden access
+ * error, your queries will
+ * execute normally. This is intended to help with certain use-cases: A graph
+ * contains several
+ * collections and different users execute AQL queries on that graph. You can
+ * now naturally limit the
+ * accessible results by changing the access rights of users on collections.
+ * This feature is only
+ * available in the Enterprise Edition.
+ * @return this
+ * @since ArangoDB 3.2.0
+ */
+ public AqlQueryOptions skipInaccessibleCollections(final Boolean skipInaccessibleCollections) {
+ getOptions().setSkipInaccessibleCollections(skipInaccessibleCollections);
+ return this;
+ }
+
+ @JsonIgnore
+ public Long getSpillOverThresholdMemoryUsage() {
+ return getOptions().getSpillOverThresholdMemoryUsage();
+ }
+
+ /**
+ * @param spillOverThresholdMemoryUsage This option allows queries to store intermediate and final results
+ * temporarily on disk if the amount of memory used (in bytes) exceeds the
+ * specified value. This is used for decreasing the memory usage during the
+ * query execution.
+ *
+ * This option only has an effect on queries that use the SORT operation but
+ * without a LIMIT, and if you enable the spillover feature by setting a path
+ * for the directory to store the temporary data in with the
+ * --temp.intermediate-results-path startup option.
+ *
+ * Default value: 128MB.
+ *
+ * Spilling data from RAM onto disk is an experimental feature and is turned
+ * off by default. The query results are still built up entirely in RAM on
+ * Coordinators and single servers for non-streaming queries. To avoid the
+ * buildup of the entire query result in RAM, use a streaming query (see the
+ * stream option).
+ * @return this
+ */
+ public AqlQueryOptions spillOverThresholdMemoryUsage(final Long spillOverThresholdMemoryUsage) {
+ getOptions().setSpillOverThresholdMemoryUsage(spillOverThresholdMemoryUsage);
+ return this;
+ }
+
+ @JsonIgnore
+ public Long getSpillOverThresholdNumRows() {
+ return getOptions().getSpillOverThresholdNumRows();
+ }
+
+ /**
+ * @param spillOverThresholdNumRows This option allows queries to store intermediate and final results temporarily
+ * on disk if the number of rows produced by the query exceeds the specified value.
+ * This is used for decreasing the memory usage during the query execution. In a
+ * query that iterates over a collection that contains documents, each row is a
+ * document, and in a query that iterates over temporary values
+ * (i.e. FOR i IN 1..100), each row is one of such temporary values.
+ *
+ * This option only has an effect on queries that use the SORT operation but
+ * without a LIMIT, and if you enable the spillover feature by setting a path for
+ * the directory to store the temporary data in with the
+ * --temp.intermediate-results-path startup option.
+ *
+ * Default value: 5000000 rows.
+ *
+ * Spilling data from RAM onto disk is an experimental feature and is turned off
+ * by default. The query results are still built up entirely in RAM on Coordinators
+ * and single servers for non-streaming queries. To avoid the buildup of the entire
+ * query result in RAM, use a streaming query (see the stream option).
+ * @return this
+ */
+ public AqlQueryOptions spillOverThresholdNumRows(final Long spillOverThresholdNumRows) {
+ getOptions().setSpillOverThresholdNumRows(spillOverThresholdNumRows);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getStream() {
+ return getOptions().getStream();
+ }
+
+ /**
+ * @param stream Specify true and the query will be executed in a streaming fashion. The query result is not
+ * stored on
+ * the server, but calculated on the fly. Beware: long-running queries will need to hold the
+ * collection
+ * locks for as long as the query cursor exists. When set to false a query will be executed right
+ * away in
+ * its entirety. In that case query results are either returned right away (if the resultset is small
+ * enough), or stored on the arangod instance and accessible via the cursor API (with respect to the
+ * ttl). It is advisable to only use this option on short-running queries or without exclusive locks
+ * (write-locks on MMFiles). Please note that the query options cache, count and fullCount will not
+ * work
+ * on streaming queries. Additionally query statistics, warnings and profiling data will only be
+ * available after the query is finished. The default value is false
+ * @return this
+ * @since ArangoDB 3.4.0
+ */
+ public AqlQueryOptions stream(final Boolean stream) {
+ getOptions().setStream(stream);
+ return this;
+ }
+
+ @JsonIgnore
+ public Collection getRules() {
+ return getOptions().getOptimizer().getRules();
+ }
+
+ /**
+ * @param rules A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute,
+ * telling the
+ * optimizer to include or exclude specific rules. To disable a rule, prefix its name with a -, to
+ * enable
+ * a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules
+ * @return this
+ */
+ public AqlQueryOptions rules(final Collection rules) {
+ getOptions().getOptimizer().setRules(rules);
+ return this;
+ }
+
}
diff --git a/core/src/main/java/com/arangodb/model/ExplainAqlQueryOptions.java b/core/src/main/java/com/arangodb/model/ExplainAqlQueryOptions.java
new file mode 100644
index 000000000..4d4d2f511
--- /dev/null
+++ b/core/src/main/java/com/arangodb/model/ExplainAqlQueryOptions.java
@@ -0,0 +1,597 @@
+/*
+ * DISCLAIMER
+ *
+ * Copyright 2016 ArangoDB GmbH, Cologne, Germany
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Copyright holder is ArangoDB GmbH, Cologne, Germany
+ */
+
+package com.arangodb.model;
+
+import com.arangodb.internal.serde.UserDataInside;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Map;
+
+/**
+ * @author Michele Rastelli
+ */
+public final class ExplainAqlQueryOptions {
+
+ private Map bindVars;
+ private String query;
+ private AqlQueryOptions.Options options;
+
+ public ExplainAqlQueryOptions() {
+ super();
+ }
+
+ @UserDataInside
+ public Map getBindVars() {
+ return bindVars;
+ }
+
+ /**
+ * @param bindVars key/value pairs representing the bind parameters
+ * @return options
+ */
+ ExplainAqlQueryOptions bindVars(final Map bindVars) {
+ this.bindVars = bindVars;
+ return this;
+ }
+
+ public String getQuery() {
+ return query;
+ }
+
+ /**
+ * @param query the query which you want explained
+ * @return options
+ */
+ ExplainAqlQueryOptions query(final String query) {
+ this.query = query;
+ return this;
+ }
+
+ public AqlQueryOptions.Options getOptions() {
+ if (options == null) {
+ options = new AqlQueryOptions.Options();
+ }
+ return options;
+ }
+
+ public ExplainAqlQueryOptions options(final AqlQueryOptions.Options options) {
+ this.options = options;
+ return this;
+ }
+
+
+ // ------------------------------------
+ // --- accessors for nested options ---
+ // ------------------------------------
+
+ @JsonIgnore
+ public Map getCustomOptions() {
+ return getOptions().getCustomOptions();
+ }
+
+ /**
+ * Set an additional custom option in the form of key-value pair.
+ *
+ * @param key option name
+ * @param value option value
+ * @return this
+ */
+ public ExplainAqlQueryOptions customOption(String key, Object value) {
+ getOptions().setCustomOption(key, value);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getAllPlans() {
+ return getOptions().getAllPlans();
+ }
+
+ /**
+ * @param value if set to true, all possible execution plans will be returned. The default is false, meaning only
+ * the optimal plan will be returned.
+ * @return this
+ */
+ public ExplainAqlQueryOptions allPlans(final Boolean value) {
+ getOptions().setAllPlans(value);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getAllowDirtyReads() {
+ return getOptions().getAllowDirtyReads();
+ }
+
+ /**
+ * @param allowDirtyReads If you set this option to true and execute the query against a cluster deployment, then
+ * the Coordinator is allowed to read from any shard replica and not only from the leader.
+ * You may observe data inconsistencies (dirty reads) when reading from followers, namely
+ * obsolete revisions of documents because changes have not yet been replicated to the
+ * follower, as well as changes to documents before they are officially committed on the
+ * leader. This feature is only available in the Enterprise Edition.
+ * @return this
+ */
+ public ExplainAqlQueryOptions allowDirtyReads(final Boolean allowDirtyReads) {
+ getOptions().setAllowDirtyReads(allowDirtyReads);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getAllowRetry() {
+ return getOptions().getAllowRetry();
+ }
+
+ /**
+ * @param allowRetry Set this option to true to make it possible to retry fetching the latest batch from a cursor.
+ *
+ * This makes possible to safely retry invoking {@link com.arangodb.ArangoCursor#next()} in
+ * case of I/O exceptions (which are actually thrown as {@link com.arangodb.ArangoDBException}
+ * with cause {@link java.io.IOException})
+ *
+ * If set to false (default), then it is not safe to retry invoking
+ * {@link com.arangodb.ArangoCursor#next()} in case of I/O exceptions, since the request to
+ * fetch the next batch is not idempotent (i.e. the cursor may advance multiple times on the
+ * server).
+ *
+ * Note: once you successfully received the last batch, you should call
+ * {@link com.arangodb.ArangoCursor#close()} so that the server does not unnecessary keep the
+ * batch until the cursor times out ({@link AqlQueryOptions#ttl(Integer)}).
+ * @return this
+ * @since ArangoDB 3.11
+ */
+ public ExplainAqlQueryOptions allowRetry(final Boolean allowRetry) {
+ getOptions().setAllowRetry(allowRetry);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getFailOnWarning() {
+ return getOptions().getFailOnWarning();
+ }
+
+ /**
+ * @param failOnWarning When set to true, the query will throw an exception and abort instead of producing a
+ * warning. This option should be used during development to catch potential issues early.
+ * When the attribute is set to false, warnings will not be propagated to exceptions and will
+ * be returned with the query result. There is also a server configuration option
+ * --query.fail-on-warning for setting the default value for failOnWarning so it does not
+ * need to be set on a per-query level.
+ * @return this
+ */
+ public ExplainAqlQueryOptions failOnWarning(final Boolean failOnWarning) {
+ getOptions().setFailOnWarning(failOnWarning);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getFillBlockCache() {
+ return getOptions().getFillBlockCache();
+ }
+
+ /**
+ * @param fillBlockCache if set to true
or not specified, this will make the query store
+ * the data it reads via the RocksDB storage engine in the RocksDB block cache. This is
+ * usually the desired behavior. The option can be set to false
for queries that
+ * are known to either read a lot of data that would thrash the block cache, or for queries
+ * that read data known to be outside of the hot set. By setting the option
+ * to false
, data read by the query will not make it into the RocksDB block
+ * cache if it is not already in there, thus leaving more room for the actual hot set.
+ * @return this
+ * @since ArangoDB 3.8.1
+ */
+ public ExplainAqlQueryOptions fillBlockCache(final Boolean fillBlockCache) {
+ getOptions().setFillBlockCache(fillBlockCache);
+ return this;
+ }
+
+ @JsonIgnore
+ public String getForceOneShardAttributeValue() {
+ return getOptions().getForceOneShardAttributeValue();
+ }
+
+ /**
+ * @param forceOneShardAttributeValue This query option can be used in complex queries in case the query optimizer
+ * cannot automatically detect that the query can be limited to only a single
+ * server (e.g. in a disjoint smart graph case).
+ *
+ * If the option is set incorrectly, i.e. to a wrong shard key value, then the
+ * query may be shipped to a wrong DB server and may not return results (i.e.
+ * empty result set).
+ *
+ * Use at your own risk.
+ * @return this
+ */
+ public ExplainAqlQueryOptions forceOneShardAttributeValue(final String forceOneShardAttributeValue) {
+ getOptions().setForceOneShardAttributeValue(forceOneShardAttributeValue);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getFullCount() {
+ return getOptions().getFullCount();
+ }
+
+ /**
+ * @param fullCount if set to true and the query contains a LIMIT clause, then the result will have an extra
+ * attribute
+ * with the sub-attributes stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 }
+ * } }. The
+ * fullCount attribute will contain the number of documents in the result before the last LIMIT
+ * in the
+ * query was applied. It can be used to count the number of documents that match certain filter
+ * criteria,
+ * but only return a subset of them, in one go. It is thus similar to MySQL's
+ * SQL_CALC_FOUND_ROWS hint.
+ * Note that setting the option will disable a few LIMIT optimizations and may lead to more
+ * documents
+ * being processed, and thus make queries run longer. Note that the fullCount attribute will
+ * only be
+ * present in the result if the query has a LIMIT clause and the LIMIT clause is actually used
+ * in the
+ * query.
+ * @return this
+ */
+ public ExplainAqlQueryOptions fullCount(final Boolean fullCount) {
+ getOptions().setFullCount(fullCount);
+ return this;
+ }
+
+ @JsonIgnore
+ public Long getIntermediateCommitCount() {
+ return getOptions().getIntermediateCommitCount();
+ }
+
+ /**
+ * @param intermediateCommitCount Maximum number of operations after which an intermediate commit is performed
+ * automatically. Honored by
+ * the RocksDB storage engine only.
+ * @return this
+ * @since ArangoDB 3.2.0
+ */
+ public ExplainAqlQueryOptions intermediateCommitCount(final Long intermediateCommitCount) {
+ getOptions().setIntermediateCommitCount(intermediateCommitCount);
+ return this;
+ }
+
+ @JsonIgnore
+ public Long getIntermediateCommitSize() {
+ return getOptions().getIntermediateCommitSize();
+ }
+
+ /**
+ * @param intermediateCommitSize Maximum total size of operations after which an intermediate commit is performed
+ * automatically.
+ * Honored by the RocksDB storage engine only.
+ * @return this
+ * @since ArangoDB 3.2.0
+ */
+ public ExplainAqlQueryOptions intermediateCommitSize(final Long intermediateCommitSize) {
+ getOptions().setIntermediateCommitSize(intermediateCommitSize);
+ return this;
+ }
+
+ @JsonIgnore
+ public Integer getMaxDNFConditionMembers() {
+ return getOptions().getMaxDNFConditionMembers();
+ }
+
+ /**
+ * @param maxDNFConditionMembers A threshold for the maximum number of OR sub-nodes in the internal representation
+ * of an AQL FILTER condition.
+ *
+ * Yon can use this option to limit the computation time and memory usage when
+ * converting complex AQL FILTER conditions into the internal DNF (disjunctive normal
+ * form) format. FILTER conditions with a lot of logical branches (AND, OR, NOT) can
+ * take a large amount of processing time and memory. This query option limits the
+ * computation time and memory usage for such conditions.
+ *
+ * Once the threshold value is reached during the DNF conversion of a FILTER
+ * condition, the conversion is aborted, and the query continues with a simplified
+ * internal representation of the condition, which cannot be used for index lookups.
+ *
+ * You can set the threshold globally instead of per query with the
+ * --query.max-dnf-condition-members startup option.
+ * @return this
+ */
+ public ExplainAqlQueryOptions maxDNFConditionMembers(final Integer maxDNFConditionMembers) {
+ getOptions().setMaxDNFConditionMembers(maxDNFConditionMembers);
+ return this;
+ }
+
+ @JsonIgnore
+ public Integer getMaxNodesPerCallstack() {
+ return getOptions().getMaxNodesPerCallstack();
+ }
+
+ /**
+ * @param maxNodesPerCallstack The number of execution nodes in the query plan after that stack splitting is
+ * performed to avoid a potential stack overflow. Defaults to the configured value of
+ * the startup option --query.max-nodes-per-callstack.
+ *
+ * This option is only useful for testing and debugging and normally does not need any
+ * adjustment.
+ * @return this
+ */
+ public ExplainAqlQueryOptions maxNodesPerCallstack(final Integer maxNodesPerCallstack) {
+ getOptions().setMaxNodesPerCallstack(maxNodesPerCallstack);
+ return this;
+ }
+
+ @JsonIgnore
+ public Integer getMaxNumberOfPlans() {
+ return getOptions().getMaxNumberOfPlans();
+ }
+
+ /**
+ * @param maxNumberOfPlans Limits the maximum number of plans that are created by the AQL query optimizer.
+ * @return this
+ */
+ public ExplainAqlQueryOptions maxNumberOfPlans(final Integer maxNumberOfPlans) {
+ getOptions().setMaxNumberOfPlans(maxNumberOfPlans);
+ return this;
+ }
+
+ @JsonIgnore
+ public Double getMaxRuntime() {
+ return getOptions().getMaxRuntime();
+ }
+
+ /**
+ * @param maxRuntime The query has to be executed within the given runtime or it will be killed. The value is specified
+ * in seconds. The default value is 0.0 (no timeout).
+ * @return this
+ */
+ public ExplainAqlQueryOptions maxRuntime(final Double maxRuntime) {
+ getOptions().setMaxRuntime(maxRuntime);
+ return this;
+ }
+
+ @JsonIgnore
+ public Long getMaxTransactionSize() {
+ return getOptions().getMaxTransactionSize();
+ }
+
+ /**
+ * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only.
+ * @return this
+ * @since ArangoDB 3.2.0
+ */
+ public ExplainAqlQueryOptions maxTransactionSize(final Long maxTransactionSize) {
+ getOptions().setMaxTransactionSize(maxTransactionSize);
+ return this;
+ }
+
+ @JsonIgnore
+ public Long getMaxWarningCount() {
+ return getOptions().getMaxWarningCount();
+ }
+
+ /**
+ * @param maxWarningCount Limits the maximum number of warnings a query will return. The number of warnings a
+ * query will return
+ * is limited to 10 by default, but that number can be increased or decreased by setting
+ * this attribute.
+ * @return this
+ * @since ArangoDB 3.2.0
+ */
+ public ExplainAqlQueryOptions maxWarningCount(final Long maxWarningCount) {
+ getOptions().setMaxWarningCount(maxWarningCount);
+ return this;
+ }
+
+ @JsonIgnore
+ public AqlQueryOptions.Optimizer getOptimizer() {
+ return getOptions().getOptimizer();
+ }
+
+ /**
+ * @param optimizer Options related to the query optimizer.
+ * @return this
+ */
+ public ExplainAqlQueryOptions optimizer(final AqlQueryOptions.Optimizer optimizer) {
+ getOptions().setOptimizer(optimizer);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getProfile() {
+ return getOptions().getProfile();
+ }
+
+ /**
+ * @param profile If set to true, then the additional query profiling information will be returned in the
+ * sub-attribute
+ * profile of the extra return attribute if the query result is not served from the query cache.
+ * @return this
+ */
+ public ExplainAqlQueryOptions profile(final Boolean profile) {
+ getOptions().setProfile(profile);
+ return this;
+ }
+
+ @JsonIgnore
+ public Double getSatelliteSyncWait() {
+ return getOptions().getSatelliteSyncWait();
+ }
+
+ /**
+ * @param satelliteSyncWait This enterprise parameter allows to configure how long a DBServer will have time to
+ * bring the
+ * satellite collections involved in the query into sync. The default value is 60.0
+ * (seconds). When the
+ * max time has been reached the query will be stopped.
+ * @return this
+ * @since ArangoDB 3.2.0
+ */
+ public ExplainAqlQueryOptions satelliteSyncWait(final Double satelliteSyncWait) {
+ getOptions().setSatelliteSyncWait(satelliteSyncWait);
+ return this;
+ }
+
+ @JsonIgnore
+ public Collection getShardIds() {
+ return getOptions().getShardIds();
+ }
+
+ /**
+ * Restrict query to shards by given ids. This is an internal option. Use at your own risk.
+ *
+ * @param shardIds
+ * @return this
+ */
+ public ExplainAqlQueryOptions shardIds(final String... shardIds) {
+ getOptions().setShardIds(Arrays.asList(shardIds));
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getSkipInaccessibleCollections() {
+ return getOptions().getSkipInaccessibleCollections();
+ }
+
+ /**
+ * @param skipInaccessibleCollections AQL queries (especially graph traversals) will treat collection to which a
+ * user has no access rights
+ * as if these collections were empty. Instead of returning a forbidden access
+ * error, your queries will
+ * execute normally. This is intended to help with certain use-cases: A graph
+ * contains several
+ * collections and different users execute AQL queries on that graph. You can
+ * now naturally limit the
+ * accessible results by changing the access rights of users on collections.
+ * This feature is only
+ * available in the Enterprise Edition.
+ * @return this
+ * @since ArangoDB 3.2.0
+ */
+ public ExplainAqlQueryOptions skipInaccessibleCollections(final Boolean skipInaccessibleCollections) {
+ getOptions().setSkipInaccessibleCollections(skipInaccessibleCollections);
+ return this;
+ }
+
+ @JsonIgnore
+ public Long getSpillOverThresholdMemoryUsage() {
+ return getOptions().getSpillOverThresholdMemoryUsage();
+ }
+
+ /**
+ * @param spillOverThresholdMemoryUsage This option allows queries to store intermediate and final results
+ * temporarily on disk if the amount of memory used (in bytes) exceeds the
+ * specified value. This is used for decreasing the memory usage during the
+ * query execution.
+ *
+ * This option only has an effect on queries that use the SORT operation but
+ * without a LIMIT, and if you enable the spillover feature by setting a path
+ * for the directory to store the temporary data in with the
+ * --temp.intermediate-results-path startup option.
+ *
+ * Default value: 128MB.
+ *
+ * Spilling data from RAM onto disk is an experimental feature and is turned
+ * off by default. The query results are still built up entirely in RAM on
+ * Coordinators and single servers for non-streaming queries. To avoid the
+ * buildup of the entire query result in RAM, use a streaming query (see the
+ * stream option).
+ * @return this
+ */
+ public ExplainAqlQueryOptions spillOverThresholdMemoryUsage(final Long spillOverThresholdMemoryUsage) {
+ getOptions().setSpillOverThresholdMemoryUsage(spillOverThresholdMemoryUsage);
+ return this;
+ }
+
+ @JsonIgnore
+ public Long getSpillOverThresholdNumRows() {
+ return getOptions().getSpillOverThresholdNumRows();
+ }
+
+ /**
+ * @param spillOverThresholdNumRows This option allows queries to store intermediate and final results temporarily
+ * on disk if the number of rows produced by the query exceeds the specified value.
+ * This is used for decreasing the memory usage during the query execution. In a
+ * query that iterates over a collection that contains documents, each row is a
+ * document, and in a query that iterates over temporary values
+ * (i.e. FOR i IN 1..100), each row is one of such temporary values.
+ *
+ * This option only has an effect on queries that use the SORT operation but
+ * without a LIMIT, and if you enable the spillover feature by setting a path for
+ * the directory to store the temporary data in with the
+ * --temp.intermediate-results-path startup option.
+ *
+ * Default value: 5000000 rows.
+ *
+ * Spilling data from RAM onto disk is an experimental feature and is turned off
+ * by default. The query results are still built up entirely in RAM on Coordinators
+ * and single servers for non-streaming queries. To avoid the buildup of the entire
+ * query result in RAM, use a streaming query (see the stream option).
+ * @return this
+ */
+ public ExplainAqlQueryOptions spillOverThresholdNumRows(final Long spillOverThresholdNumRows) {
+ getOptions().setSpillOverThresholdNumRows(spillOverThresholdNumRows);
+ return this;
+ }
+
+ @JsonIgnore
+ public Boolean getStream() {
+ return getOptions().getStream();
+ }
+
+ /**
+ * @param stream Specify true and the query will be executed in a streaming fashion. The query result is not
+ * stored on
+ * the server, but calculated on the fly. Beware: long-running queries will need to hold the
+ * collection
+ * locks for as long as the query cursor exists. When set to false a query will be executed right
+ * away in
+ * its entirety. In that case query results are either returned right away (if the resultset is small
+ * enough), or stored on the arangod instance and accessible via the cursor API (with respect to the
+ * ttl). It is advisable to only use this option on short-running queries or without exclusive locks
+ * (write-locks on MMFiles). Please note that the query options cache, count and fullCount will not
+ * work
+ * on streaming queries. Additionally query statistics, warnings and profiling data will only be
+ * available after the query is finished. The default value is false
+ * @return this
+ * @since ArangoDB 3.4.0
+ */
+ public ExplainAqlQueryOptions stream(final Boolean stream) {
+ getOptions().setStream(stream);
+ return this;
+ }
+
+ @JsonIgnore
+ public Collection getRules() {
+ return getOptions().getOptimizer().getRules();
+ }
+
+ /**
+ * @param rules A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute,
+ * telling the
+ * optimizer to include or exclude specific rules. To disable a rule, prefix its name with a -, to
+ * enable
+ * a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules
+ * @return this
+ */
+ public ExplainAqlQueryOptions rules(final Collection rules) {
+ getOptions().getOptimizer().setRules(rules);
+ return this;
+ }
+
+}
diff --git a/core/src/main/java/com/arangodb/model/OptionsBuilder.java b/core/src/main/java/com/arangodb/model/OptionsBuilder.java
index 11ea5b0ab..050c0d98f 100644
--- a/core/src/main/java/com/arangodb/model/OptionsBuilder.java
+++ b/core/src/main/java/com/arangodb/model/OptionsBuilder.java
@@ -82,6 +82,13 @@ public static AqlQueryExplainOptions build(
return options.query(query).bindVars(bindVars);
}
+ public static ExplainAqlQueryOptions build(
+ final ExplainAqlQueryOptions options,
+ final String query,
+ final Map bindVars) {
+ return options.query(query).bindVars(bindVars);
+ }
+
public static AqlQueryParseOptions build(final AqlQueryParseOptions options, final String query) {
return options.query(query);
}
diff --git a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config.json b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config.json
index aeeb8de38..94919ac94 100644
--- a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config.json
+++ b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config.json
@@ -1541,6 +1541,12 @@
"allDeclaredMethods": true,
"allDeclaredConstructors": true
},
+ {
+ "name": "com.arangodb.model.ExplainAqlQueryOptions",
+ "allDeclaredFields": true,
+ "allDeclaredMethods": true,
+ "allDeclaredConstructors": true
+ },
{
"name": "com.arangodb.model.QueueTimeSample",
"allDeclaredFields": true,
diff --git a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config.json b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config.json
index aeeb8de38..94919ac94 100644
--- a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config.json
+++ b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config.json
@@ -1541,6 +1541,12 @@
"allDeclaredMethods": true,
"allDeclaredConstructors": true
},
+ {
+ "name": "com.arangodb.model.ExplainAqlQueryOptions",
+ "allDeclaredFields": true,
+ "allDeclaredMethods": true,
+ "allDeclaredConstructors": true
+ },
{
"name": "com.arangodb.model.QueueTimeSample",
"allDeclaredFields": true,
diff --git a/test-functional/src/test/java/com/arangodb/ArangoDatabaseAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoDatabaseAsyncTest.java
index 3bf3cdf7a..cc8aab2ad 100644
--- a/test-functional/src/test/java/com/arangodb/ArangoDatabaseAsyncTest.java
+++ b/test-functional/src/test/java/com/arangodb/ArangoDatabaseAsyncTest.java
@@ -1209,7 +1209,7 @@ void explainAqlQuery(ArangoDatabaseAsync db) throws ExecutionException, Interrup
AqlQueryExplainEntity explain = db.explainAqlQuery(
getExplainQuery(db),
Collections.singletonMap("myId", "123"),
- new AqlQueryExplainOptions()).get();
+ new ExplainAqlQueryOptions()).get();
assertThat(explain).isNotNull();
checkUntypedExecutionPlan(explain.getPlan());
@@ -1238,7 +1238,36 @@ void explainAqlQueryAllPlans(ArangoDatabaseAsync db) throws ExecutionException,
AqlQueryExplainEntity explain = db.explainAqlQuery(
getExplainQuery(db),
Collections.singletonMap("myId", "123"),
- new AqlQueryExplainOptions().allPlans(true)).get();
+ new ExplainAqlQueryOptions().allPlans(true)).get();
+ assertThat(explain).isNotNull();
+
+ assertThat(explain.getPlan()).isNull();
+ assertThat(explain.getPlans()).allSatisfy(this::checkUntypedExecutionPlan);
+ assertThat(explain.getWarnings()).isNotEmpty();
+
+ CursorWarning warning = explain.getWarnings().iterator().next();
+ assertThat(warning).isNotNull();
+ assertThat(warning.getCode()).isEqualTo(1562);
+ assertThat(warning.getMessage()).contains("division by zero");
+
+ assertThat(explain.getStats()).isNotNull();
+
+ assertThat(explain.getStats().get("executionTime"))
+ .isInstanceOf(Double.class)
+ .asInstanceOf(DOUBLE)
+ .isNotNull()
+ .isPositive();
+
+ assertThat(explain.getCacheable()).isNull();
+ }
+
+ @ParameterizedTest
+ @MethodSource("asyncDbs")
+ void explainAqlQueryAllPlansCustomOption(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException {
+ AqlQueryExplainEntity explain = db.explainAqlQuery(
+ getExplainQuery(db),
+ Collections.singletonMap("myId", "123"),
+ new ExplainAqlQueryOptions().customOption("allPlans", true)).get();
assertThat(explain).isNotNull();
assertThat(explain.getPlan()).isNull();
diff --git a/test-functional/src/test/java/com/arangodb/ArangoDatabaseTest.java b/test-functional/src/test/java/com/arangodb/ArangoDatabaseTest.java
index 2761f8859..0cfd5b63c 100644
--- a/test-functional/src/test/java/com/arangodb/ArangoDatabaseTest.java
+++ b/test-functional/src/test/java/com/arangodb/ArangoDatabaseTest.java
@@ -643,6 +643,25 @@ void queryWithLimitAndFullCount(ArangoDatabase db) {
assertThat((cursor.getStats().getFullCount())).isGreaterThanOrEqualTo(10);
}
+ @ParameterizedTest
+ @MethodSource("dbs")
+ void queryWithLimitAndFullCountAsCustomOption(ArangoDatabase db) {
+ for (int i = 0; i < 10; i++) {
+ db.collection(CNAME1).insertDocument(new BaseDocument(), null);
+ }
+
+ final ArangoCursor cursor = db
+ .query("for i in " + CNAME1 + " Limit 5 return i._id", String.class, new AqlQueryOptions()
+ .customOption("fullCount", true));
+ assertThat((Object) cursor).isNotNull();
+ for (int i = 0; i < 5; i++, cursor.next()) {
+ assertThat((Iterator>) cursor).hasNext();
+ }
+ assertThat(cursor.getStats()).isNotNull();
+ assertThat(cursor.getStats().getExecutionTime()).isPositive();
+ assertThat((cursor.getStats().getFullCount())).isGreaterThanOrEqualTo(10);
+ }
+
@ParameterizedTest
@MethodSource("dbs")
void queryStats(ArangoDatabase db) {
@@ -1278,7 +1297,7 @@ void explainAqlQuery(ArangoDatabase db) {
AqlQueryExplainEntity explain = db.explainAqlQuery(
getExplainQuery(db),
Collections.singletonMap("myId", "123"),
- new AqlQueryExplainOptions());
+ new ExplainAqlQueryOptions());
assertThat(explain).isNotNull();
checkUntypedExecutionPlan(explain.getPlan());
@@ -1307,7 +1326,36 @@ void explainAqlQueryAllPlans(ArangoDatabase db) {
AqlQueryExplainEntity explain = db.explainAqlQuery(
getExplainQuery(db),
Collections.singletonMap("myId", "123"),
- new AqlQueryExplainOptions().allPlans(true));
+ new ExplainAqlQueryOptions().allPlans(true));
+ assertThat(explain).isNotNull();
+
+ assertThat(explain.getPlan()).isNull();
+ assertThat(explain.getPlans()).allSatisfy(this::checkUntypedExecutionPlan);
+ assertThat(explain.getWarnings()).isNotEmpty();
+
+ CursorWarning warning = explain.getWarnings().iterator().next();
+ assertThat(warning).isNotNull();
+ assertThat(warning.getCode()).isEqualTo(1562);
+ assertThat(warning.getMessage()).contains("division by zero");
+
+ assertThat(explain.getStats()).isNotNull();
+
+ assertThat(explain.getStats().get("executionTime"))
+ .isInstanceOf(Double.class)
+ .asInstanceOf(DOUBLE)
+ .isNotNull()
+ .isPositive();
+
+ assertThat(explain.getCacheable()).isNull();
+ }
+
+ @ParameterizedTest
+ @MethodSource("dbs")
+ void explainAqlQueryAllPlansCustomOption(ArangoDatabase db) {
+ AqlQueryExplainEntity explain = db.explainAqlQuery(
+ getExplainQuery(db),
+ Collections.singletonMap("myId", "123"),
+ new ExplainAqlQueryOptions().customOption("allPlans", true));
assertThat(explain).isNotNull();
assertThat(explain.getPlan()).isNull();