code.onehippo.org is currently readonly. We are migrating to code.bloomreach.com, please continue working there on Monday 14/12. See: https://docs.bloomreach.com/display/engineering/GitLab

Commit 035c7ff9 authored by Ate Douma's avatar Ate Douma

REPO-1811 further refactoring and cleanup of the new (Db)LockManager

- construction moved to LocalHippoRepository as well as further generalized
- LockManagerFactory moved to DbLockManagerFactory and now only is responsible for constructing a DbLockManager
- DbLockManager: added support for schemaObjectPrefix and schemaCheckEnabled, like all JR schema definitions
- Added (handling for) new AlreadyLockedException, indicating a lock retry might be feasible
- Merged DbHelper logic into DbLockManager (which ittself now gets passed into background Runnables) and use JR ConnectionHelper to check if table already exists
- Replaced ConfigurationLockManager JR Locking with new LockManager
- Added LockManagerUtilsTest, in test module, for services-api provided LockManagerUtils
parent 6354174b
......@@ -17,19 +17,11 @@
package org.apache.jackrabbit.core.journal;
import org.apache.jackrabbit.core.util.db.ConnectionHelper;
import org.onehippo.repository.journal.JournalConnectionHelperAccessor;
public class JournalConnectionHelperAccessorImpl implements JournalConnectionHelperAccessor {
public class JournalConnectionHelperAccessorImpl {
private ConnectionHelper connectionHelper;
public JournalConnectionHelperAccessorImpl(final DatabaseJournal journal) {
if (journal != null) {
connectionHelper = journal.conHelper;
}
}
public ConnectionHelper getConnectionHelper() {
return connectionHelper;
public static ConnectionHelper getConnectionHelper(final DatabaseJournal journal)
{
return journal.conHelper;
}
}
......@@ -22,7 +22,6 @@ import javax.sql.DataSource;
public class ConnectionHelperDataSourceAccessor {
private ConnectionHelperDataSourceAccessor(){
}
public static DataSource getDataSource(final ConnectionHelper connectionHelper) {
......
......@@ -28,8 +28,12 @@ import javax.jcr.Session;
import javax.jcr.SimpleCredentials;
import org.apache.commons.lang.StringUtils;
import org.apache.jackrabbit.core.cluster.ClusterNode;
import org.apache.jackrabbit.core.config.RepositoryConfig;
import org.apache.jackrabbit.core.fs.FileSystem;
import org.apache.jackrabbit.core.journal.DatabaseJournal;
import org.apache.jackrabbit.core.journal.JournalConnectionHelperAccessorImpl;
import org.apache.jackrabbit.core.util.db.ConnectionHelper;
import org.hippoecm.repository.jackrabbit.HippoNodeTypeRegistry;
import org.hippoecm.repository.nodetypes.NodeTypesChangeTracker;
import org.onehippo.cm.ConfigurationService;
......@@ -45,9 +49,10 @@ import org.hippoecm.repository.impl.ReferenceWorkspaceImpl;
import org.hippoecm.repository.jackrabbit.RepositoryImpl;
import org.hippoecm.repository.security.HippoSecurityManager;
import org.hippoecm.repository.util.RepoUtils;
import org.onehippo.repository.lock.memory.MemoryLockManager;
import org.onehippo.repository.modules.ModuleManager;
import org.onehippo.repository.lock.InternalLockManager;
import org.onehippo.repository.lock.LockManagerFactory;
import org.onehippo.repository.lock.db.DbLockManagerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
......@@ -83,7 +88,7 @@ public class LocalHippoRepository extends HippoRepositoryImpl {
private String repoConfig;
private ConfigurationServiceImpl configurationService;
private InternalLockManager lockManager;
protected InternalLockManager lockManager;
private ModuleManager moduleManager;
......@@ -196,7 +201,7 @@ public class LocalHippoRepository extends HippoRepositoryImpl {
* @return InputStream to the repository config
* @throws RepositoryException
*/
private InputStream getRepositoryConfigAsStream() throws RepositoryException {
protected InputStream getRepositoryConfigAsStream() throws RepositoryException {
String configPath = repoConfig;
......@@ -252,14 +257,31 @@ public class LocalHippoRepository extends HippoRepositoryImpl {
}
protected void initializeLocalRepository(final RepositoryConfig repConfig) throws RepositoryException {
jackrabbitRepository = new LocalRepositoryImpl(repConfig);
}
protected void initializeLockManager() throws RepositoryException {
ClusterNode clusterNode = jackrabbitRepository.getClusterNode();
if (clusterNode != null && clusterNode.getJournal() instanceof DatabaseJournal) {
DatabaseJournal dbJournal = (DatabaseJournal) clusterNode.getJournal();
ConnectionHelper connectionHelper =
JournalConnectionHelperAccessorImpl.getConnectionHelper((DatabaseJournal) clusterNode.getJournal());
this.lockManager = DbLockManagerFactory.create(connectionHelper, dbJournal.getSchemaObjectPrefix(),
dbJournal.isSchemaCheckEnabled(), jackrabbitRepository.getDescriptor("jackrabbit.cluster.id"));
} else {
this.lockManager = new MemoryLockManager();
}
}
protected void initialize() throws RepositoryException {
log.info("Initializing Hippo Repository");
Modules.setModules(new Modules(Thread.currentThread().getContextClassLoader()));
jackrabbitRepository = new LocalRepositoryImpl(createRepositoryConfig());
initializeLocalRepository(createRepositoryConfig());
initializeLockManager();
lockManager = new LockManagerFactory(jackrabbitRepository).create();
HippoServiceRegistry.registerService(lockManager, new Class[]{LockManager.class, InternalLockManager.class});
repository = new DecoratorFactoryImpl().getRepositoryDecorator(jackrabbitRepository);
......@@ -276,7 +298,7 @@ public class LocalHippoRepository extends HippoRepositoryImpl {
}
protected ConfigurationServiceImpl initializeConfiguration(final Session rootSession) throws RepositoryException {
log.info("Initializing LocalHippoRepository");
log.info("LocalHippoRepository initialize configuration");
final SimpleCredentials credentials = new SimpleCredentials("system", new char[]{});
final Session configurationServiceSession = DecoratorFactoryImpl.getSessionDecorator(rootSession.impersonate(credentials), credentials);
migrateToV12IfNeeded(configurationServiceSession, false);
......@@ -311,14 +333,14 @@ public class LocalHippoRepository extends HippoRepositoryImpl {
nodeTypesChangeTracker.stop();
nodeTypesChangeTracker = null;
}
if (lockManager != null) {
HippoServiceRegistry.unregisterService(lockManager, LockManager.class);
lockManager.destroy();
}
if (configurationService != null) {
HippoServiceRegistry.unregisterService(configurationService, ConfigurationService.class);
configurationService.stop();
}
if (lockManager != null) {
HippoServiceRegistry.unregisterService(lockManager, LockManager.class);
lockManager.destroy();
}
if (jackrabbitRepository != null) {
try {
jackrabbitRepository.shutdown();
......
......@@ -39,9 +39,7 @@ import org.apache.jackrabbit.core.config.RepositoryConfig;
import org.apache.jackrabbit.core.config.WorkspaceConfig;
import org.apache.jackrabbit.core.fs.FileSystem;
import org.apache.jackrabbit.core.id.NodeId;
import org.apache.jackrabbit.core.journal.DatabaseJournal;
import org.apache.jackrabbit.core.journal.ExternalRepositorySyncRevisionServiceImpl;
import org.apache.jackrabbit.core.journal.JournalConnectionHelperAccessorImpl;
import org.apache.jackrabbit.core.lock.LockManagerImpl;
import org.apache.jackrabbit.core.nodetype.NodeTypeRegistry;
import org.apache.jackrabbit.core.observation.ObservationDispatcher;
......@@ -61,7 +59,6 @@ import org.hippoecm.repository.query.lucene.ServicingSearchIndex;
import org.hippoecm.repository.security.HippoSecurityManager;
import org.onehippo.repository.InternalHippoRepository;
import org.onehippo.repository.journal.ExternalRepositorySyncRevisionService;
import org.onehippo.repository.journal.JournalConnectionHelperAccessor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
......@@ -76,7 +73,6 @@ public class RepositoryImpl extends org.apache.jackrabbit.core.RepositoryImpl im
protected boolean isStarted = false;
private ExternalRepositorySyncRevisionService externalRepositorySyncRevisionService;
private JournalConnectionHelperAccessor journalConnectionHelperAccessor;
protected RepositoryImpl(RepositoryConfig repConfig) throws RepositoryException {
super(repConfig);
......@@ -298,19 +294,6 @@ public class RepositoryImpl extends org.apache.jackrabbit.core.RepositoryImpl im
return externalRepositorySyncRevisionService;
}
/**
* @return JournalConnectionHelperAccessor is a journal connection is available and otherwise null
* @throws RepositoryException
*/
public synchronized JournalConnectionHelperAccessor getJournalConnectionHelperAccessor() throws RepositoryException {
sanityCheck();
if (journalConnectionHelperAccessor == null) {
ClusterNode clusterNode = context.getClusterNode();
journalConnectionHelperAccessor = new JournalConnectionHelperAccessorImpl(clusterNode == null ? null : (DatabaseJournal)clusterNode.getJournal());
}
return journalConnectionHelperAccessor;
}
@Override
protected HippoWorkspaceInfo getWorkspaceInfo(final String workspaceName) throws RepositoryException {
return (HippoWorkspaceInfo) super.getWorkspaceInfo(workspaceName);
......
......@@ -18,41 +18,22 @@ package org.onehippo.cm.engine;
import java.util.concurrent.locks.ReentrantLock;
import javax.jcr.RepositoryException;
import javax.jcr.Session;
import javax.jcr.SimpleCredentials;
import javax.jcr.lock.LockException;
import org.onehippo.repository.locking.HippoLock;
import org.onehippo.repository.locking.HippoLockManager;
import org.onehippo.cms7.services.HippoServiceRegistry;
import org.onehippo.cms7.services.lock.LockManager;
import org.onehippo.cms7.services.lock.LockManagerUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.hippoecm.repository.api.HippoNodeType.HIPPO_LOCK;
import static org.hippoecm.repository.util.RepoUtils.getClusterNodeId;
import static org.onehippo.cm.engine.Constants.HCM_ROOT_PATH;
/**
* The ConfigurationLockManager is used to create and release a persistent {@Link HippoLock} for accessing
* The ConfigurationLockManager is used to lock and unlock for accessing
* and modifying the ConfigurationModel in the repository, on LOCK_PATH.
* <p>
* Note the persistent lock management will be done with a separate/dedicated Session, impersonated from the Session
* passed into the constructor, which otherwise will not be used any further.</p>
* The {@link #lock} and {@link #unlock()} methods can be thread reentrant</p>
* <p>
* A {@link HippoLock} will be created with a default timeout of 30 seconds, and be kept alive until the lock is
* explicitly released through {@link #unlock}.</p>
* <p>
* The default timeout can be overridden through system parameter <b><code>hcm.lock.timeout</code></b> (seconds).</p>
* <p>
* Note that {@link HippoLock} requires a minimum timeout of 10 seconds to be able to keep it alive!</p>
* <p>
* When acquiring a {@link HippoLock} fails because of a LockException it will be re-attempted every 500 ms (indefinitely)
* until it succeeds.</p>
* </p>
* <p>
* The {@link #lock} and {@link #unlock()} methods can be thread reentrant and use an internal {@link ReentrantLock}
* for the actual creation and releasing of the {@link HippoLock}.</p>
* <p>
* Therefore, like with {@link ReentrantLock} the typical usage-pattern is/should be:</p>
* Therefore the typical usage-pattern is/should be:</p>
* <pre><code>
* lockManager.lock();
* try {
......@@ -67,91 +48,33 @@ public class ConfigurationLockManager {
private static final Logger log = LoggerFactory.getLogger(ConfigurationLockManager.class);
private static final long LOCK_ATTEMPT_INTERVAL = 500;
private static final long LOCK_TIMEOUT = Long.getLong("hcm.lock.timeout", 30);
public static final String LOCK_PATH = HCM_ROOT_PATH + "/" + HIPPO_LOCK;
public static final String LOCK_PATH = HCM_ROOT_PATH;
private final ReentrantLock reentrantLock = new ReentrantLock();
private final Session lockSession;
private final String lockOwnerId;
private final HippoLockManager hippoLockManager;
private HippoLock hippoLock;
public ConfigurationLockManager(final Session configurationSession) throws RepositoryException {
final SimpleCredentials credentials = new SimpleCredentials(configurationSession.getUserID(), new char[]{});
lockSession = configurationSession.impersonate(credentials);
lockSession.getWorkspace().getObservationManager().setUserData(Constants.HCM_ROOT);
lockOwnerId = getClusterNodeId(lockSession);
hippoLockManager = (HippoLockManager) lockSession.getWorkspace().getLockManager();
}
private void ensureIsLockable() throws RepositoryException {
if (!lockSession.nodeExists(LOCK_PATH)) {
lockSession.getNode(HCM_ROOT_PATH).addNode(HIPPO_LOCK, HIPPO_LOCK);
lockSession.save();
}
}
private final LockManager lockManager;
private boolean locked;
private boolean unlockHippoLock(final HippoLock lock) throws RepositoryException {
if (lock != null) {
try {
log.debug("Attempting to release lock");
lock.stopKeepAlive();
lockSession.refresh(false);
hippoLockManager.unlock(LOCK_PATH);
log.debug("Lock successfully released");
} catch (LockException e) {
log.warn("Current session no longer holds a lock");
} catch (RepositoryException e) {
log.error("Failed to unlock initialization processor: {}. " +
"Lock will time out within {} seconds", e.toString(), LOCK_TIMEOUT);
return false;
}
}
return true;
public ConfigurationLockManager() {
lockManager = HippoServiceRegistry.getService(LockManager.class);
}
public void lock() throws RepositoryException {
boolean locked = false;
reentrantLock.lock();
try {
if (hippoLock == null || !hippoLock.isLive()) {
ensureIsLockable();
while (true) {
log.debug("Attempting to obtain lock");
try {
hippoLock = hippoLockManager.lock(LOCK_PATH, false, false, LOCK_TIMEOUT, lockOwnerId);
log.debug("Lock successfully obtained");
try {
hippoLock.startKeepAlive();
break;
} catch (LockException e) {
if (log.isDebugEnabled()) {
log.warn("Failed to start lock keep-alive", e);
} else {
log.warn("Failed to start lock keep-alive: " + e);
}
throw new RepositoryException(e);
}
} catch (LockException e) {
log.debug("Obtaining lock failed, reattempting in {} ms", LOCK_ATTEMPT_INTERVAL);
try {
Thread.sleep(LOCK_ATTEMPT_INTERVAL);
} catch (InterruptedException ignore) {
}
}
if (!locked) {
try {
LockManagerUtils.waitForLock(lockManager, LOCK_PATH, LOCK_ATTEMPT_INTERVAL);
locked = true;
} catch (Exception e) {
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
} else {
if (!hippoLock.isLive()) {
throw new LockException("Lock no longer alive");
throw new RepositoryException(e);
} finally {
if (!locked) {
// In Exception context: don't hold on to the reentrantLock
reentrantLock.unlock();
}
}
locked = true;
} finally {
if (!locked) {
// In Exception context: don't hold on to the reentrantLock
reentrantLock.unlock();
}
}
}
......@@ -160,9 +83,8 @@ public class ConfigurationLockManager {
try {
if (reentrantLock.getHoldCount() < 3) {
if (reentrantLock.getHoldCount() == 2) {
if (unlockHippoLock(hippoLock)) {
hippoLock = null;
}
lockManager.unlock(LOCK_PATH);
locked = false;
} else {
// Error: unlock call without balanced lock call
// second unlock() call below will result in IllegalMonitorStateException!
......@@ -175,14 +97,8 @@ public class ConfigurationLockManager {
}
public void stop() {
try {
if (lockSession != null && lockSession.isLive()) {
unlockHippoLock(hippoLock);
hippoLock = null;
lockSession.logout();
}
} catch (RepositoryException e) {
log.warn("Failed to unlock or logout during stop", e);
}
// gracefully wait for possible running locked thread to finish
reentrantLock.lock();
reentrantLock.unlock();
}
}
......@@ -132,7 +132,7 @@ public class ConfigurationServiceImpl implements InternalConfigurationService {
}
private void init(final StartRepositoryServicesTask startRepositoryServicesTask) throws RepositoryException {
lockManager = new ConfigurationLockManager(session);
lockManager = new ConfigurationLockManager();
baselineService = new ConfigurationBaselineService(lockManager);
configService = new ConfigurationConfigService();
contentService = new ConfigurationContentService(baselineService, new JcrContentProcessor());
......@@ -290,34 +290,15 @@ public class ConfigurationServiceImpl implements InternalConfigurationService {
autoExportService.close();
autoExportService = null;
}
boolean locked = false;
if (lockManager != null) {
if (runtimeConfigurationModel != null) {
try {
lockManager.lock();
locked = true;
// Ensure configurationModel resources are cleaned up (if any)
runtimeConfigurationModel.close();
} catch (Exception e) {
log.error("Failed to claim the configuration lock", e);
}
}
try {
if (runtimeConfigurationModel != null) {
try {
// Ensure configurationModel resources are cleaned up (if any)
runtimeConfigurationModel.close();
} catch (Exception e) {
log.error("Error closing runtime configuration", e);
}
}
runtimeConfigurationModel = null;
} finally {
if (locked) {
try {
lockManager.unlock();
} catch (Exception e) {
log.error("Failed to release the configuration lock", e);
}
log.error("Error closing runtime configuration", e);
}
}
runtimeConfigurationModel = null;
contentService = null;
configService = null;
baselineService = null;
......
/*
* Copyright 2017 Hippo B.V. (http://www.onehippo.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onehippo.repository.journal;
import org.apache.jackrabbit.core.util.db.ConnectionHelper;
public interface JournalConnectionHelperAccessor {
/**
* @return the {@link ConnectionHelper} and {@code null} if not present (for example when not in cluster setup)
*/
ConnectionHelper getConnectionHelper();
}
......@@ -28,6 +28,7 @@ import org.onehippo.cms7.services.lock.Lock;
import org.onehippo.cms7.services.lock.LockException;
import org.onehippo.cms7.services.lock.LockManagerException;
import org.onehippo.cms7.services.lock.LockResource;
import org.onehippo.cms7.services.lock.AlreadyLockedException;
import org.slf4j.Logger;
import static java.util.concurrent.TimeUnit.SECONDS;
......@@ -95,7 +96,7 @@ public abstract class AbstractLockManager implements InternalLockManager {
lock.increment();
return new LockResourceImpl(lock);
}
throw new LockException(String.format("This thread '%s' cannot lock '%s' : already locked by thread '%s'",
throw new AlreadyLockedException(String.format("This thread '%s' cannot lock '%s' : already locked by thread '%s'",
Thread.currentThread().getName(), key, lockThread.getName()));
}
......@@ -178,6 +179,7 @@ public abstract class AbstractLockManager implements InternalLockManager {
getLogger().info("Thread '{}' already stopped for lock '{}'.", localLock.getLockThread(), key);
return;
}
// signal the running thread that it should abort : This thread should then in turn call #unlock itself : That
// won't be done here
try {
......
/*
* Copyright 2017 Hippo B.V. (http://www.onehippo.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onehippo.repository.lock.db;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DbHelper {
private static final Logger log = LoggerFactory.getLogger(DbHelper.class);
/**
* Creates the table {@code tableName} and throws a {@link RuntimeException} if it does not succeed in it. Note that
* if in the meantime another cluster node has created the table, this method does not throw an exception but just
* returns.
* @param dataSource
* @param tableName
*/
public static void createTableIfNeeded(final DataSource dataSource,
final String createTableStatement,
final String tableName,
final String... uniqueIndexes) throws RuntimeException {
try {
try (Connection connection = dataSource.getConnection()) {
final boolean tableExists = tableExists(connection, tableName);
if (!tableExists) {
log.info("Creating table {} ", tableName);
try (Statement statement = connection.createStatement()) {
statement.addBatch(String.format(createTableStatement, tableName));
for (String uniqueIndex : uniqueIndexes) {
statement.addBatch("CREATE UNIQUE INDEX " + tableName + "_idx_1 on " + tableName + "("+uniqueIndex+")");
}
statement.setQueryTimeout(10);
statement.executeBatch();
} catch (SQLException e) {
if (tableExists(connection, tableName)) {
log.debug("Table {} already created by another cluster node", tableName);
} else {
log.error("Failed to create table {}: {}", tableName, e.getMessage());
throw e;
}
}
}
}
} catch (SQLException e) {
log.error("Could not get a connection or could not create table");
throw new RuntimeException("Could not get a connection not create table", e);
}
}
public static boolean tableExists(final Connection connection, final String tableName) throws SQLException {
final ResultSet resultSet = connection.getMetaData().getTables(null,
null,
connection.getMetaData().storesUpperCaseIdentifiers() ? tableName.toUpperCase() : tableName, null);
return resultSet.next();
}
public static void close(final Connection connection) {
if (connection == null) {
return;
}
try {
connection.close();
} catch (SQLException e) {
log.error("Failed to close connection.", e);
}
}
public static void close(final Connection connection, final boolean originalAutoCommit) {
if (connection == null) {
return;
}
try {
connection.setAutoCommit(originalAutoCommit);
connection.close();
} catch (SQLException e) {
log.error("Failed to close connection.", e);
}
}
}
......@@ -20,15 +20,9 @@ import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.concurrent.TimeUnit;
import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.onehippo.repository.lock.db.DbHelper.close;
import static org.onehippo.repository.lock.db.DbLockManager.REFRESH_LOCK_STATEMENT;
import static org.onehippo.repository.lock.db.DbLockManager.REMOVE_OUTDATED_LOCKS;
/**
* Removes all locks that are free for longer than a day
*/
......@@ -36,10 +30,10 @@ public class DbLockCleanupJanitor implements Runnable {
private static final Logger log = LoggerFactory.getLogger(DbLockCleanupJanitor.class);
private final DataSource dataSource;
private final DbLockManager dbLockManager;
public DbLockCleanupJanitor(final DataSource dataSource) {
this.dataSource = dataSource;
public DbLockCleanupJanitor(final DbLockManager dbLockManager) {
this.dbLockManager = dbLockManager;
}
@Override
......@@ -47,11 +41,11 @@ public class DbLockCleanupJanitor implements Runnable {
Connection connection = null;
boolean originalAutoCommit = false;
try {
connection = dataSource.getConnection();
connection = dbLockManager.getConnection();
originalAutoCommit = connection.getAutoCommit();
connection.setAutoCommit(true);
final PreparedStatement removeStatement = connection.prepareStatement(REMOVE_OUTDATED_LOCKS);
final PreparedStatement removeStatement = connection.prepareStatement(dbLockManager.getRemoveOutdatedStatement());
long dayAgoTime = System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(1, TimeUnit.DAYS);
removeStatement.setLong(1, dayAgoTime);
int updated = removeStatement.executeUpdate();
......@@ -60,7 +54,7 @@ public class DbLockCleanupJanitor implements Runnable {
} catch (SQLException e) {
log.error("Error while trying remove outdated locks", e);
} finally {
close(connection, originalAutoCommit);
dbLockManager.close(connection, originalAutoCommit);
}
}
}
......@@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onehippo.repository.lock;
package org.onehippo.repository.lock.db;
import java.sql.Connection;
import java.sql.SQLException;
......@@ -23,55 +23,56 @@ import javax.sql.DataSource;
import org.apache.jackrabbit.core.util.db.ConnectionHelper;
import org.apache.jackrabbit.core.util.db.ConnectionHelperDataSourceAccessor;
import org.hippoecm.repository.jackrabbit.RepositoryImpl;
import org.onehippo.cms7.services.lock.LockManager;
import org.onehippo.repository.lock.AbstractLockManager;
import org.onehippo.repository.lock.db.DbLockManager;
import org.onehippo.repository.lock.db.OracleDbLockManager;
import org.onehippo.repository.lock.memory.MemoryLockManager;
import org.apache.jackrabbit.core.util.db.OracleConnectionHelper;
public class LockManagerFactory {
public class DbLockManagerFactory {
private RepositoryImpl repositoryImpl;
public LockManagerFactory(final RepositoryImpl repositoryImpl) {
public static DbLockManager create(final ConnectionHelper connectionHelper, final String schemaObjectPrefix,
final boolean schemaCheckEnabled, final String clusterNodeId) throws RepositoryException {
return create(ConnectionHelperDataSourceAccessor.getDataSource(connectionHelper), connectionHelper,
schemaObjectPrefix, schemaCheckEnabled, clusterNodeId);
}
this.repositoryImpl = repositoryImpl;
public static DbLockManager create(final DataSource dataSource, final String schemaObjectPrefix,
final boolean schemaCheckEnabled, final String clusterNodeId) throws RepositoryException {
return create(dataSource, null, schemaObjectPrefix, schemaCheckEnabled, clusterNodeId);
}
/**
* Creates the {@link LockManager} which can be used for general purpose locking *not* using JCR at all
* Creates the {@link DbLockManager} which can be used for general purpose locking *not* using JCR at all
* @throws RuntimeException if the lock manager cannot be created, resulting the repository startup to short-circuit
* @throws RepositoryException if a repository exception happened while creating the lock manager
*/
public AbstractLockManager create() throws RuntimeException, RepositoryException {
final ConnectionHelper journalConnectionHelper = repositoryImpl.getJournalConnectionHelperAccessor().getConnectionHelper();
if (journalConnectionHelper != null) {
final DataSource dataSource = ConnectionHelperDataSourceAccessor.getDataSource(journalConnectionHelper);
String clusterNodeId = repositoryImpl.getDescriptor("jackrabbit.cluster.id");
String dbProductName;
private static DbLockManager create(final DataSource dataSource, ConnectionHelper connectionHelper,