Skip to content

Instantly share code, notes, and snippets.

@davidecavestro
Created August 20, 2013 16:27
Show Gist options
  • Save davidecavestro/6283789 to your computer and use it in GitHub Desktop.
Save davidecavestro/6283789 to your computer and use it in GitHub Desktop.
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringReader;
import java.sql.Array;
import java.sql.Blob;
import java.sql.CallableStatement;
import java.sql.ClientInfoStatus;
import java.sql.Clob;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.NClob;
import java.sql.PreparedStatement;
import java.sql.SQLClientInfoException;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.sql.SQLXML;
import java.sql.Savepoint;
import java.sql.Statement;
import java.sql.Struct;
import java.sql.Timestamp;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentLinkedQueue;
import javax.sql.DataSource;
import org.h2.jdbcx.JdbcDataSource;
/**
* A standalone test case trying to reproduce an issue about <a
* href="http://h2-database.66688.n3.nabble.com/h2-Continuous-Increase-in-H2-db-size-after-dropping-
* and-loading-same-data-repeatedly-td4026836.html">continuous increase in H2 db size</a>.
*
* @author Davide Cavestro
*/
public class H2IncreasingSizeTest {
private static final int DEFAULT_ITERATIONS = 5;
private static final int DEFAULT_TABLES = 20;
private static final int DEFAULT_RECORDS = 5008;
private static final int DEFAULT_BATCH_SIZE = 10;
private final static DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
/**
* @param args
*/
public static void main(final String[] args) {
final int runs;
final int tables;
final int rows;
final int batchSize;
if (args != null && args.length > 0) {
runs = Integer.valueOf(args[0]);
} else {
runs = DEFAULT_ITERATIONS;
}
if (args != null && args.length > 1) {
tables = Integer.valueOf(args[1]);
} else {
tables = DEFAULT_TABLES;
}
if (args != null && args.length > 2) {
rows = Integer.valueOf(args[2]);
} else {
rows = DEFAULT_RECORDS;
}
if (args != null && args.length > 3) {
batchSize = Integer.valueOf(args[3]);
} else {
batchSize = DEFAULT_BATCH_SIZE;
}
try {
new H2IncreasingSizeTest().run(runs, tables, rows, batchSize);
} catch (final IOException e) {
e.printStackTrace();
} catch (final SQLException e) {
e.printStackTrace();
}
}
private void run(final int iterations, final int tables, final int rows, final int batchSize) throws IOException,
SQLException {
final File dbDir = new File(System.getProperty("java.io.tmpdir"), "h2sizetest");
// deleteRecursively(dbDir);
dbDir.mkdirs();
// dbDir.deleteOnExit();
final String url =
"jdbc:h2:" + dbDir.getCanonicalPath()
+ "/data;MODE=PostgreSQL;CACHE_SIZE=65536;FILE_LOCK=SOCKET;LOG=1;MAX_LOG_SIZE=1024";
final NaivePoolingDataSource dataSource = new NaivePoolingDataSource();
dataSource.setURL(url);
dataSource.setUser("sa");
dataSource.setPassword("sa");
logMsg("Running " + iterations + " iterations with " + tables + " tables, " + rows + " rows and " + batchSize
+ " batch size");
for (int i = 0; i < iterations; i++) {
final Connection connection = dataSource.getConnection();
connection.setAutoCommit(false);
try {
logMsg("Running iteration " + (i + 1));
dropContents(connection);
importData(connection, tables, rows, batchSize);
// logMsg("db size after " + (i + 1) + " iterations "
// + (new File(dbDir, "data.h2.db").length() / (1024 * 1024)) + "Kb");
logMsg("Iteration " + (i + 1) + " completed");
} finally {
connection.close();
}
}
}
private void deleteRecursively(File toDelete) {
final File[] files = toDelete.listFiles();
if (files != null) {
for (File file : files) {
deleteRecursively(file);
}
}
toDelete.delete();
}
/**
* Drop db contents
*
* @param connection
* @throws SQLException
*/
private void dropContents(final Connection connection) throws SQLException {
logMsg("Droppping db contents");
final Statement stmt = connection.createStatement();
try {
stmt.executeUpdate("DROP ALL OBJECTS");
} finally {
stmt.close();
}
}
private void logMsg(final String msg) {
System.out.println(dateFormat.format(new Date()) + " " + msg);
}
/**
* Generate tables and populate them with fake data.
*
* @param connection
* @param tables
* @param rows
* @throws SQLException
*/
private void importData(final Connection connection, final int tables, final int rows, final int batchSize)
throws SQLException {
for (int tableIdx = 0; tableIdx < tables; tableIdx++) {
final Statement stmt = connection.createStatement();
try {
stmt.executeUpdate("CREATE TABLE mytable" + tableIdx
+ " (the_pk integer, the_name varchar(4000), the_ts timestamp)");
} finally {
stmt.close();
}
}
for (int tableIdx = 0; tableIdx < tables; tableIdx++) {
final PreparedStatement pstmt =
connection.prepareStatement("INSERT INTO mytable" + tableIdx
+ " (the_pk, the_name, the_ts) VALUES (?,?,?)");
try {
boolean hasBatch = false;
for (int rowIdx = 0; rowIdx < rows; rowIdx++) {
pstmt.setInt(1, rowIdx);
final String name =
"A veeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeery long string generated only to insert enough data to reproduce the increasing db size PITA "
+ rowIdx;
// pstmt.setString(2, name);
pstmt.setCharacterStream(2, new StringReader(name), (name).length());
pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
pstmt.addBatch();
hasBatch = true;
if (((rowIdx + 1) % batchSize) == 0) {// every batchSize rows
commitBatches(connection, pstmt);
hasBatch = false;
}
}
if (hasBatch) {// some batches were not executed yet
commitBatches(connection, pstmt);
}
} finally {
pstmt.close();
}
}
}
/**
* Executes cumulated batches and commits.
*
* @param connection
* @param pstmt
* @throws SQLException
*/
private void commitBatches(final Connection connection, final PreparedStatement pstmt) throws SQLException {
pstmt.executeBatch();
connection.commit();
}
private static class NaivePoolingDataSource implements DataSource {
private final ConcurrentLinkedQueue<Connection> pool = new ConcurrentLinkedQueue<Connection>();
final JdbcDataSource delegateDataSource = new JdbcDataSource();
@Override
public PrintWriter getLogWriter() throws SQLException {
return delegateDataSource.getLogWriter();
}
public void setPassword(String password) {
delegateDataSource.setPassword(password);
}
public void setUser(String user) {
delegateDataSource.setUser(user);
}
public void setURL(String url) {
delegateDataSource.setURL(url);
}
@Override
public void setLogWriter(PrintWriter out) throws SQLException {
delegateDataSource.setLogWriter(out);
}
@Override
public void setLoginTimeout(int seconds) throws SQLException {
delegateDataSource.setLoginTimeout(seconds);
}
@Override
public int getLoginTimeout() throws SQLException {
return delegateDataSource.getLoginTimeout();
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return delegateDataSource.unwrap(iface);
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return delegateDataSource.isWrapperFor(iface);
}
@Override
public Connection getConnection() throws SQLException {
synchronized (pool) {
if (!pool.isEmpty()) {
return pool.poll();
} else {
return new PoolableConnection(delegateDataSource.getConnection());
}
}
}
@Override
public Connection getConnection(String username, String password) throws SQLException {
synchronized (pool) {
if (!pool.isEmpty()) {
return pool.poll();
} else {
return new PoolableConnection(delegateDataSource.getConnection(username, password));
}
}
}
class PoolableConnection implements Connection {
private final Connection delegateConnection;
private boolean closed = false;
public PoolableConnection(final Connection delegateConnection) {
this.delegateConnection = delegateConnection;
}
public Connection getDelegateConnection() {
return delegateConnection;
}
protected void checkOpen() throws SQLException {
if (isClosed())
throw new SQLException("connection already closed (i.e.: already returned to the data source)");
}
@Override
public void clearWarnings() throws SQLException {
checkOpen();
delegateConnection.clearWarnings();
}
@Override
public void close() throws SQLException {
if (!isClosed()) {
returnConnection(delegateConnection);
closed = true;
}
}
@Override
public void commit() throws SQLException {
checkOpen();
delegateConnection.commit();
}
@Override
public Statement createStatement() throws SQLException {
checkOpen();
return delegateConnection.createStatement();
}
@Override
public Statement createStatement(final int resultSetType, final int resultSetConcurrency,
final int resultSetHoldability) throws SQLException {
checkOpen();
return delegateConnection.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability);
}
@Override
public Statement createStatement(final int resultSetType, final int resultSetConcurrency)
throws SQLException {
checkOpen();
return delegateConnection.createStatement(resultSetType, resultSetConcurrency);
}
@Override
public boolean getAutoCommit() throws SQLException {
checkOpen();
return delegateConnection.getAutoCommit();
}
@Override
public String getCatalog() throws SQLException {
checkOpen();
return delegateConnection.getCatalog();
}
@Override
public int getHoldability() throws SQLException {
checkOpen();
return delegateConnection.getHoldability();
}
@Override
public DatabaseMetaData getMetaData() throws SQLException {
checkOpen();
return delegateConnection.getMetaData();
}
@Override
public int getTransactionIsolation() throws SQLException {
checkOpen();
return delegateConnection.getTransactionIsolation();
}
@Override
public Map<String, Class<?>> getTypeMap() throws SQLException {
checkOpen();
return delegateConnection.getTypeMap();
}
@Override
public SQLWarning getWarnings() throws SQLException {
checkOpen();
return delegateConnection.getWarnings();
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public boolean isReadOnly() throws SQLException {
checkOpen();
return delegateConnection.isReadOnly();
}
@Override
public String nativeSQL(final String sql) throws SQLException {
checkOpen();
return delegateConnection.nativeSQL(sql);
}
@Override
public CallableStatement prepareCall(final String sql, final int resultSetType,
final int resultSetConcurrency, final int resultSetHoldability) throws SQLException {
checkOpen();
return delegateConnection.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability);
}
@Override
public CallableStatement prepareCall(final String sql, final int resultSetType,
final int resultSetConcurrency) throws SQLException {
checkOpen();
return delegateConnection.prepareCall(sql, resultSetType, resultSetConcurrency);
}
@Override
public CallableStatement prepareCall(final String sql) throws SQLException {
checkOpen();
return delegateConnection.prepareCall(sql);
}
@Override
public PreparedStatement prepareStatement(final String sql, final int resultSetType,
final int resultSetConcurrency, final int resultSetHoldability) throws SQLException {
checkOpen();
return delegateConnection.prepareStatement(sql, resultSetType, resultSetConcurrency,
resultSetHoldability);
}
@Override
public PreparedStatement prepareStatement(final String sql, final int resultSetType,
final int resultSetConcurrency) throws SQLException {
checkOpen();
return delegateConnection.prepareStatement(sql, resultSetType, resultSetConcurrency);
}
@Override
public PreparedStatement prepareStatement(final String sql, final int autoGeneratedKeys)
throws SQLException {
checkOpen();
return delegateConnection.prepareStatement(sql, autoGeneratedKeys);
}
@Override
public PreparedStatement prepareStatement(final String sql, final int[] columnIndexes) throws SQLException {
checkOpen();
return delegateConnection.prepareStatement(sql, columnIndexes);
}
@Override
public PreparedStatement prepareStatement(final String sql, final String[] columnNames) throws SQLException {
checkOpen();
return delegateConnection.prepareStatement(sql, columnNames);
}
@Override
public PreparedStatement prepareStatement(final String sql) throws SQLException {
checkOpen();
return delegateConnection.prepareStatement(sql);
}
@Override
public void releaseSavepoint(final Savepoint savepoint) throws SQLException {
checkOpen();
delegateConnection.releaseSavepoint(savepoint);
}
@Override
public void rollback() throws SQLException {
checkOpen();
delegateConnection.rollback();
}
@Override
public void rollback(final Savepoint savepoint) throws SQLException {
checkOpen();
delegateConnection.rollback(savepoint);
}
@Override
public void setAutoCommit(final boolean autoCommit) throws SQLException {
checkOpen();
delegateConnection.setAutoCommit(autoCommit);
}
@Override
public void setCatalog(final String catalog) throws SQLException {
checkOpen();
delegateConnection.setCatalog(catalog);
}
@Override
public void setHoldability(final int holdability) throws SQLException {
checkOpen();
delegateConnection.setHoldability(holdability);
}
@Override
public void setReadOnly(final boolean readOnly) throws SQLException {
checkOpen();
delegateConnection.setReadOnly(readOnly);
}
@Override
public Savepoint setSavepoint() throws SQLException {
checkOpen();
return delegateConnection.setSavepoint();
}
@Override
public Savepoint setSavepoint(final String name) throws SQLException {
checkOpen();
return delegateConnection.setSavepoint(name);
}
@Override
public void setTransactionIsolation(final int level) throws SQLException {
checkOpen();
delegateConnection.setTransactionIsolation(level);
}
@Override
public void setTypeMap(final Map<String, Class<?>> map) throws SQLException {
checkOpen();
delegateConnection.setTypeMap(map);
}
@Override
public Array createArrayOf(final String typeName, final Object[] elements) throws SQLException {
checkOpen();
return delegateConnection.createArrayOf(typeName, elements);
}
@Override
public Properties getClientInfo() throws SQLException {
checkOpen();
return delegateConnection.getClientInfo();
}
@Override
public void setClientInfo(final Properties properties) throws SQLClientInfoException {
if (isClosed()) {
final Map<String, ClientInfoStatus> failures = new HashMap<String, ClientInfoStatus>();
for (final String propertyName : properties.stringPropertyNames())
failures.put(propertyName, ClientInfoStatus.REASON_VALUE_INVALID);
throw new SQLClientInfoException("connection is closed", failures);
}
delegateConnection.setClientInfo(properties);
}
@Override
public String getClientInfo(final String name) throws SQLException {
checkOpen();
return delegateConnection.getClientInfo(name);
}
@Override
public void setClientInfo(final String name, final String value) throws SQLClientInfoException {
if (isClosed()) {
final Map<String, ClientInfoStatus> failures = new HashMap<String, ClientInfoStatus>();
failures.put(name, ClientInfoStatus.REASON_VALUE_INVALID);
throw new SQLClientInfoException("connection is closed", failures);
}
delegateConnection.setClientInfo(name, value);
}
@Override
public Blob createBlob() throws SQLException {
checkOpen();
return delegateConnection.createBlob();
}
@Override
public Clob createClob() throws SQLException {
checkOpen();
return delegateConnection.createClob();
}
@Override
public NClob createNClob() throws SQLException {
checkOpen();
return delegateConnection.createNClob();
}
@Override
public SQLXML createSQLXML() throws SQLException {
checkOpen();
return delegateConnection.createSQLXML();
}
@Override
public Struct createStruct(final String typeName, final Object[] attributes) throws SQLException {
checkOpen();
return delegateConnection.createStruct(typeName, attributes);
}
@Override
public boolean isValid(final int timeout) throws SQLException {
return delegateConnection.isValid(timeout);
}
@Override
public boolean isWrapperFor(final Class<?> iface) throws SQLException {
return delegateConnection.isWrapperFor(iface);
}
@Override
public <T> T unwrap(final Class<T> iface) throws SQLException {
return delegateConnection.unwrap(iface);
}
}
public void returnConnection(Connection delegateConnection) {
synchronized (pool) {
pool.offer(delegateConnection);
}
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment