Skip to content

SQL: test coverage for JdbcResultSet #32813

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
Aug 31, 2018
Merged
Original file line number Diff line number Diff line change
Expand Up @@ -133,72 +133,37 @@ public String getString(int columnIndex) throws SQLException {

@Override
public boolean getBoolean(int columnIndex) throws SQLException {
Object val = column(columnIndex);
try {
return val != null ? (Boolean) val : false;
} catch (ClassCastException cce) {
throw new SQLException("unable to convert column " + columnIndex + " to a boolean", cce);
}
return column(columnIndex) != null ? getObject(columnIndex, Boolean.class) : false;
}

@Override
public byte getByte(int columnIndex) throws SQLException {
Object val = column(columnIndex);
try {
return val != null ? ((Number) val).byteValue() : 0;
} catch (ClassCastException cce) {
throw new SQLException("unable to convert column " + columnIndex + " to a byte", cce);
}
return column(columnIndex) != null ? getObject(columnIndex, Byte.class) : 0;
}

@Override
public short getShort(int columnIndex) throws SQLException {
Object val = column(columnIndex);
try {
return val != null ? ((Number) val).shortValue() : 0;
} catch (ClassCastException cce) {
throw new SQLException("unable to convert column " + columnIndex + " to a short", cce);
}
return column(columnIndex) != null ? getObject(columnIndex, Short.class) : 0;
}

@Override
public int getInt(int columnIndex) throws SQLException {
Object val = column(columnIndex);
try {
return val != null ? ((Number) val).intValue() : 0;
} catch (ClassCastException cce) {
throw new SQLException("unable to convert column " + columnIndex + " to an int", cce);
}
return column(columnIndex) != null ? getObject(columnIndex, Integer.class) : 0;
}

@Override
public long getLong(int columnIndex) throws SQLException {
Object val = column(columnIndex);
try {
return val != null ? ((Number) val).longValue() : 0;
} catch (ClassCastException cce) {
throw new SQLException("unable to convert column " + columnIndex + " to a long", cce);
}
return column(columnIndex) != null ? getObject(columnIndex, Long.class) : 0;
}

@Override
public float getFloat(int columnIndex) throws SQLException {
Object val = column(columnIndex);
try {
return val != null ? ((Number) val).floatValue() : 0;
} catch (ClassCastException cce) {
throw new SQLException("unable to convert column " + columnIndex + " to a float", cce);
}
return column(columnIndex) != null ? getObject(columnIndex, Float.class) : 0;
}

@Override
public double getDouble(int columnIndex) throws SQLException {
Object val = column(columnIndex);
try {
return val != null ? ((Number) val).doubleValue() : 0;
} catch (ClassCastException cce) {
throw new SQLException("unable to convert column " + columnIndex + " to a double", cce);
}
return column(columnIndex) != null ? getObject(columnIndex, Double.class) : 0;
}

@Override
Expand Down Expand Up @@ -272,12 +237,24 @@ public byte[] getBytes(String columnLabel) throws SQLException {

@Override
public Date getDate(String columnLabel) throws SQLException {
// TODO: the error message in case the value in the column cannot be converted to a Date refers to a column index
// (for example - "unable to convert column 4 to a long") and not to the column name, which is a bit confusing.
// Should we reconsider this? Maybe by catching the exception here and rethrowing it with the columnLabel instead.
return getDate(column(columnLabel));
}

private Long dateTime(int columnIndex) throws SQLException {
Object val = column(columnIndex);
try {
// TODO: the B6 appendix of the jdbc spec does mention CHAR, VARCHAR, LONGVARCHAR, DATE, TIMESTAMP as supported
// jdbc types that should be handled by getDate and getTime methods. From all of those we support VARCHAR and
// TIMESTAMP. Should we consider the VARCHAR conversion as a later enhancement?
if (JDBCType.TIMESTAMP.equals(cursor.columns().get(columnIndex - 1).type)) {
// the cursor can return an Integer if the date-since-epoch is small enough, XContentParser (Jackson) will
// return the "smallest" data type for numbers when parsing
// TODO: this should probably be handled server side
return val == null ? null : ((Number) val).longValue();
};
return val == null ? null : (Long) val;
} catch (ClassCastException cce) {
throw new SQLException("unable to convert column " + columnIndex + " to a long", cce);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ private TypeConverter() {

}

private static final long DAY_IN_MILLIS = 60 * 60 * 24;
private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yikes

private static final Map<Class<?>, JDBCType> javaToJDBC;

static {
Expand Down Expand Up @@ -143,7 +143,10 @@ static <T> T convert(Object val, JDBCType columnType, Class<T> type) throws SQLE
return (T) convert(val, columnType);
}

if (type.isInstance(val)) {
// converting a Long to a Timestamp shouldn't be possible according to the spec,
// it feels a little brittle to check this scenario here and I don't particularly like it
// TODO: can we do any better or should we go over the spec and allow getLong(date) to be valid?
if (!(type == Long.class && columnType == JDBCType.TIMESTAMP) && type.isInstance(val)) {
try {
return type.cast(val);
} catch (ClassCastException cce) {
Expand Down Expand Up @@ -336,6 +339,8 @@ private static Boolean asBoolean(Object val, JDBCType columnType) throws SQLExce
case FLOAT:
case DOUBLE:
return Boolean.valueOf(Integer.signum(((Number) val).intValue()) != 0);
case VARCHAR:
return Boolean.valueOf((String) val);
default:
throw new SQLException("Conversion from type [" + columnType + "] to [Boolean] not supported");

Expand All @@ -355,6 +360,12 @@ private static Byte asByte(Object val, JDBCType columnType) throws SQLException
case FLOAT:
case DOUBLE:
return safeToByte(safeToLong(((Number) val).doubleValue()));
case VARCHAR:
try {
return Byte.valueOf((String) val);
} catch (NumberFormatException e) {
throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] to a Byte", val), e);
}
default:
}

Expand All @@ -374,6 +385,12 @@ private static Short asShort(Object val, JDBCType columnType) throws SQLExceptio
case FLOAT:
case DOUBLE:
return safeToShort(safeToLong(((Number) val).doubleValue()));
case VARCHAR:
try {
return Short.valueOf((String) val);
} catch (NumberFormatException e) {
throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] to a Short", val), e);
}
default:
}

Expand All @@ -393,6 +410,12 @@ private static Integer asInteger(Object val, JDBCType columnType) throws SQLExce
case FLOAT:
case DOUBLE:
return safeToInt(safeToLong(((Number) val).doubleValue()));
case VARCHAR:
try {
return Integer.valueOf((String) val);
} catch (NumberFormatException e) {
throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] to an Integer", val), e);
}
default:
}

Expand All @@ -412,8 +435,17 @@ private static Long asLong(Object val, JDBCType columnType) throws SQLException
case FLOAT:
case DOUBLE:
return safeToLong(((Number) val).doubleValue());
case TIMESTAMP:
return ((Number) val).longValue();
//TODO: should we support conversion to TIMESTAMP?
//The spec says that getLong() should support the following types conversions:
//TINYINT, SMALLINT, INTEGER, BIGINT, REAL, FLOAT, DOUBLE, DECIMAL, NUMERIC, BIT, BOOLEAN, CHAR, VARCHAR, LONGVARCHAR
//case TIMESTAMP:
// return ((Number) val).longValue();
case VARCHAR:
try {
return Long.valueOf((String) val);
} catch (NumberFormatException e) {
throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] to a Long", val), e);
}
default:
}

Expand All @@ -433,6 +465,12 @@ private static Float asFloat(Object val, JDBCType columnType) throws SQLExceptio
case FLOAT:
case DOUBLE:
return new Float(((Number) val).doubleValue());
case VARCHAR:
try {
return Float.valueOf((String) val);
} catch (NumberFormatException e) {
throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] to a Float", val), e);
}
default:
}

Expand All @@ -452,6 +490,12 @@ private static Double asDouble(Object val, JDBCType columnType) throws SQLExcept
case FLOAT:
case DOUBLE:
return new Double(((Number) val).doubleValue());
case VARCHAR:
try {
return Double.valueOf((String) val);
} catch (NumberFormatException e) {
throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] to a Double", val), e);
}
default:
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ public void testThrownExceptionsWhenSettingStringValues() throws SQLException {
JdbcPreparedStatement jps = createJdbcPreparedStatement();

SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, "foo bar", Types.INTEGER));
assertEquals("Conversion from type [VARCHAR] to [Integer] not supported", sqle.getMessage());
assertEquals("Unable to convert value [foo bar] to an Integer", sqle.getMessage());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should probably be consistent in the messages. Half of the messages say "Unable to convert" another half "Conversion from type " I understand that in one case we bailout earlier by just looking at type, but in other case we are actually trying to parse the string. But I wonder if it would makes sense to have the same type of message and specify value and type in both cases.

}

public void testSettingByteTypeValues() throws SQLException {
Expand Down Expand Up @@ -550,7 +550,8 @@ public void testThrownExceptionsWhenSettingByteArrayValues() throws SQLException
}

private long randomMillisSinceEpoch() {
return randomLongBetween(0, System.currentTimeMillis());
// random between Jan 1st, 1970 and Jan 1st, 2050
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Mostly just curious: Why this end bound? Also, should we also be testing epochMillis that are before 1970-01-01 as well since they are perfectly valid?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I saw this one before in ResultSetBaseTestCase. Maybe it's time to move it to ESTestCase along with other random*(....) methods. I agree with @colings86 - it would be nice to get negative timestamps from time to time for testing. But I am +1 on constant upper bound since it will make the failure reproducible in (possibly very rare) case it somehow fails because of a particular timestamp.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@colings86 @imotov since the suggestion is to move this to ESTestCase base class (and be part of the test infra in ES), any good ideas for the interval start date?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure why randomLong() is not good enough here? All long values are valid timestamps since epoch so should be not just use that?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍 That sounds like a great idea. As long as we don't pass that to H2 we should be fine.

return ESTestCase.randomLongBetween(0, 2524608000000L);
}

private JdbcPreparedStatement createJdbcPreparedStatement() throws SQLException {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/

package org.elasticsearch.xpack.qa.sql.nosecurity;

import org.elasticsearch.xpack.qa.sql.jdbc.ResultSetTestCase;

public class JdbcResultSetIT extends ResultSetTestCase {
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you add a JavaDoc here that just describes why nothing needs to be added to this class, at first glance it looks quite strange to extends a test class and not add any test or override any methods. I'm sure there is a reason but its not obvious on first looking why doing this is right

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, that confuses people. The reasons for this is that we want to run the same tests in two or three very different integration clusters. We should probably update all tests like this with comments explaining it.

Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/

package org.elasticsearch.xpack.qa.sql.security;

import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.jdbc.ResultSetTestCase;

import java.util.Properties;

public class JdbcResultSetIT extends ResultSetTestCase {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is enough different for this to be worth testing with security?

@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}

@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}

@Override
protected Properties connectionProperties() {
Properties sp = super.connectionProperties();
sp.putAll(JdbcSecurityIT.adminProperties());
return sp;
}
}
Loading