diff --git a/CHANGELOG.md b/CHANGELOG.md
index 57e365aada..6ef07d9123 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -281,7 +281,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [2.9.0] - 2020-11-06
### Added
-- Tracing api ([#1393](https://github.com/scm-manager/scm-manager/pull/#1393))
+- Tracing api ([#1393](https://github.com/scm-manager/scm-manager/pull/1393))
- Automatic user converter for external users ([#1380](https://github.com/scm-manager/scm-manager/pull/1380))
- Create _authenticated group on setup ([#1396](https://github.com/scm-manager/scm-manager/pull/1396))
- The name of the initial git branch can be configured and is set to `main` by default ([#1399](https://github.com/scm-manager/scm-manager/pull/1399))
diff --git a/docs/en/administration/workdir_caching.md b/docs/en/administration/workdir_caching.md
new file mode 100644
index 0000000000..7df2b557e0
--- /dev/null
+++ b/docs/en/administration/workdir_caching.md
@@ -0,0 +1,20 @@
+---
+title: Caching for Working Directories
+---
+
+SCM-Manager offers commands to modify repositories on the server side. For example this is used by the
+[Editor Plugin](https://www.scm-manager.org/plugins/scm-editor-plugin/) and the
+[Review Plugin](https://www.scm-manager.org/plugins/scm-review-plugin/). Without further configuration, this is done
+by cloning/checking out the repository temporarily, performing the change, creating a commit and pushing the changes
+back to the central repository. The larger the repositories, the longer this may take.
+
+To speed up such changes a lot, SCM-Manager offers a strategy where the local clones will be cached and reused for
+subsequent requests. This strategy caches up to a configurable amount of clones (but at most one per repository).
+To enable this strategy, add the system property `scm.workingCopyPoolStrategy` to the value
+`sonia.scm.repository.work.SimpleCachingWorkingCopyPool`:
+
+```bash
+-Dscm.workingCopyPoolStrategy=sonia.scm.repository.work.SimpleCachingWorkingCopyPool
+```
+
+The maximum capacity of the cache can be set using the property `scm.workingCopyPoolSize` (the default is 5).
diff --git a/docs/en/navigation.yml b/docs/en/navigation.yml
index e0edf3418d..bdae6704ae 100644
--- a/docs/en/navigation.yml
+++ b/docs/en/navigation.yml
@@ -22,6 +22,7 @@
- /administration/logging/
- /administration/scm-server/
- /administration/reverse-proxies/
+ - /administration/workdir_caching/
- section: Development
entries:
diff --git a/gradle/changelog/simple_workdir_cache.yaml b/gradle/changelog/simple_workdir_cache.yaml
new file mode 100644
index 0000000000..7ff1960e21
--- /dev/null
+++ b/gradle/changelog/simple_workdir_cache.yaml
@@ -0,0 +1,2 @@
+- type: changed
+ description: The simple workdir cache has a maximum size, an lru semantic and blocks on parallel requests ([#1735](https://github.com/scm-manager/scm-manager/pull/1735))
diff --git a/scm-core/src/main/java/sonia/scm/repository/work/SimpleCachingWorkingCopyPool.java b/scm-core/src/main/java/sonia/scm/repository/work/SimpleCachingWorkingCopyPool.java
index a66e55aa4e..d57b3cfeed 100644
--- a/scm-core/src/main/java/sonia/scm/repository/work/SimpleCachingWorkingCopyPool.java
+++ b/scm-core/src/main/java/sonia/scm/repository/work/SimpleCachingWorkingCopyPool.java
@@ -24,35 +24,53 @@
package sonia.scm.repository.work;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Stopwatch;
+import io.micrometer.core.instrument.Counter;
+import io.micrometer.core.instrument.MeterRegistry;
+import io.micrometer.core.instrument.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import sonia.scm.util.IOUtil;
import javax.inject.Inject;
import java.io.File;
+import java.util.LinkedHashMap;
import java.util.Map;
+import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import static java.lang.Integer.getInteger;
+import static java.util.Optional.empty;
+import static java.util.Optional.of;
/**
* This class is a simple implementation of the {@link WorkingCopyPool} to demonstrate,
- * how caching can work in the simplest way. For the first time a {@link WorkingCopy} is
+ * how caching can work in an LRU style. For the first time a {@link WorkingCopy} is
* requested for a repository with {@link #getWorkingCopy(SimpleWorkingCopyFactory.WorkingCopyContext)},
* this implementation fetches a new directory from the {@link WorkdirProvider}.
* On {@link #contextClosed(SimpleWorkingCopyFactory.WorkingCopyContext, File)},
- * the directory is not deleted, but put into a map with the repository id as key.
+ * the directory is not deleted, but put into a cache with the repository id as key.
* When a working copy is requested with {@link #getWorkingCopy(SimpleWorkingCopyFactory.WorkingCopyContext)}
* for a repository with such an existing directory, it is taken from the map, reclaimed and
* returned as {@link WorkingCopy}.
* If for one repository a working copy is requested, while another is in use already,
- * a second directory is requested from the {@link WorkdirProvider} for the second one.
- * If a context is closed with {@link #contextClosed(SimpleWorkingCopyFactory.WorkingCopyContext, File)}
- * and there already is a directory stored in the map for the repository,
- * the directory from the closed context simply is deleted.
+ * the process will wait until the other process has finished.
+ * The number of directories cached is limited. By default, directories are cached for
+ * {@value DEFAULT_WORKING_COPY_POOL_SIZE} repositories. This can be changes with the system
+ * property '{@value WORKING_COPY_POOL_SIZE_PROPERTY}' (if this is set to zero, no caching will
+ * take place; to cache the directories for each repository without eviction simply set this to a
+ * high enough value).
*
- * In general, this implementation should speed up things a bit, but one has to take into
- * account, that there is no monitoring of diskspace. So you have to make sure, that
- * there is enough space for a clone of each repository in the working dir.
+ * The usage of this pool has to be enabled by setting the system property `scm.workingCopyPoolStrategy`
+ * to 'sonia.scm.repository.work.SimpleCachingWorkingCopyPool'.
+ *
+ * In general, this implementation should speed up modifications inside SCM-Manager performed by
+ * the editor plugin or the review plugin, but one has to take into
+ * account, that the space needed for repositories is multiplied. So you have to make sure, that
+ * there is enough space for clones of the repository.
*
* Possible enhancements:
*
@@ -65,49 +83,142 @@ import java.util.concurrent.ConcurrentHashMap;
*/
public class SimpleCachingWorkingCopyPool implements WorkingCopyPool {
+ public static final int DEFAULT_WORKING_COPY_POOL_SIZE = 5;
+ public static final String WORKING_COPY_POOL_SIZE_PROPERTY = "scm.workingCopyPoolSize";
+
private static final Logger LOG = LoggerFactory.getLogger(SimpleCachingWorkingCopyPool.class);
- private final Map workdirs = new ConcurrentHashMap<>();
-
private final WorkdirProvider workdirProvider;
+ private final LinkedHashMap workdirs;
+ private final Map locks;
+ private final boolean cacheEnabled;
+
+ private final Counter cacheHitCounter;
+ private final Counter cacheMissCounter;
+ private final Counter reclaimFailureCounter;
+ private final Counter overflowCounter;
+ private final Timer parallelWaitTimer;
+ private final Timer reclaimTimer;
+ private final Timer initializeTimer;
+ private final Timer deleteTimer;
@Inject
- public SimpleCachingWorkingCopyPool(WorkdirProvider workdirProvider) {
+ public SimpleCachingWorkingCopyPool(WorkdirProvider workdirProvider, MeterRegistry meterRegistry) {
+ this(getInteger(WORKING_COPY_POOL_SIZE_PROPERTY, DEFAULT_WORKING_COPY_POOL_SIZE), workdirProvider, meterRegistry);
+ }
+
+ @VisibleForTesting
+ SimpleCachingWorkingCopyPool(int size, WorkdirProvider workdirProvider, MeterRegistry meterRegistry) {
this.workdirProvider = workdirProvider;
+ this.workdirs = new LruMap(size);
+ this.locks = new ConcurrentHashMap<>();
+ cacheEnabled = size > 0;
+ cacheHitCounter = Counter
+ .builder("scm.workingcopy.pool.cache.hit")
+ .description("The amount of cache hits for the working copy pool")
+ .register(meterRegistry);
+ cacheMissCounter = Counter
+ .builder("scm.workingcopy.pool.cache.miss")
+ .description("The amount of cache misses for the working copy pool")
+ .register(meterRegistry);
+ reclaimFailureCounter = Counter
+ .builder("scm.workingcopy.pool.reclaim.failure")
+ .description("The amount of failed reclaim processes from pool")
+ .register(meterRegistry);
+ overflowCounter = Counter
+ .builder("scm.workingcopy.pool.cache.overflow")
+ .description("The amount of discarded working copies from pool due to cache overflow")
+ .register(meterRegistry);
+ parallelWaitTimer = Timer
+ .builder("scm.workingcopy.pool.parallel")
+ .description("Duration of blocking waits for available working copies in pool")
+ .register(meterRegistry);
+ reclaimTimer = Timer
+ .builder("scm.workingcopy.pool.reclaim.duration")
+ .description("Duration of reclaiming existing working copies in pool")
+ .register(meterRegistry);
+ initializeTimer = Timer
+ .builder("scm.workingcopy.pool.initialize.duration")
+ .description("Duration of initialization of working copies in pool")
+ .register(meterRegistry);
+ deleteTimer = Timer
+ .builder("scm.workingcopy.pool.delete.duration")
+ .description("Duration of deletes of working copies from pool")
+ .register(meterRegistry);
}
@Override
- public WorkingCopy getWorkingCopy(SimpleWorkingCopyFactory.WorkingCopyContext workingCopyContext) {
+ public WorkingCopy getWorkingCopy(SimpleWorkingCopyFactory.WorkingCopyContext context) {
+ Lock lock = getLock(context);
+ parallelWaitTimer.record(lock::lock);
+ try {
+ return getWorkingCopyFromPoolOrCreate(context);
+ } catch (RuntimeException e) {
+ lock.unlock();
+ throw e;
+ }
+ }
+
+ private WorkingCopy getWorkingCopyFromPoolOrCreate(SimpleWorkingCopyFactory.WorkingCopyContext workingCopyContext) {
String id = workingCopyContext.getScmRepository().getId();
- File existingWorkdir = workdirs.remove(id);
+ File existingWorkdir;
+ synchronized (workdirs) {
+ existingWorkdir = workdirs.remove(id);
+ }
if (existingWorkdir != null) {
- Stopwatch stopwatch = Stopwatch.createStarted();
- try {
- WorkingCopy reclaimed = workingCopyContext.reclaim(existingWorkdir);
- LOG.debug("reclaimed workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), existingWorkdir, stopwatch.stop());
- return reclaimed;
- } catch (SimpleWorkingCopyFactory.ReclaimFailedException e) {
- LOG.debug("failed to reclaim workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), existingWorkdir, stopwatch.stop(), e);
- deleteWorkdir(existingWorkdir);
+ Optional> reclaimedWorkingCopy = tryToReclaim(workingCopyContext, existingWorkdir);
+ if (reclaimedWorkingCopy.isPresent()) {
+ cacheHitCounter.increment();
+ return reclaimedWorkingCopy.get();
}
+ } else {
+ cacheMissCounter.increment();
}
return createNewWorkingCopy(workingCopyContext);
}
+ private Optional> tryToReclaim(SimpleWorkingCopyFactory.WorkingCopyContext workingCopyContext, File existingWorkdir) {
+ return reclaimTimer.record(() -> {
+ Stopwatch stopwatch = Stopwatch.createStarted();
+ try {
+ WorkingCopy reclaimed = workingCopyContext.reclaim(existingWorkdir);
+ LOG.debug("reclaimed workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), existingWorkdir, stopwatch.stop());
+ return of(reclaimed);
+ } catch (Exception e) {
+ LOG.debug("failed to reclaim workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), existingWorkdir, stopwatch.stop(), e);
+ deleteWorkdir(existingWorkdir);
+ reclaimFailureCounter.increment();
+ return empty();
+ }
+ });
+ }
+
private WorkingCopy createNewWorkingCopy(SimpleWorkingCopyFactory.WorkingCopyContext workingCopyContext) {
- Stopwatch stopwatch = Stopwatch.createStarted();
- File newWorkdir = workdirProvider.createNewWorkdir(workingCopyContext.getScmRepository().getId());
- WorkingCopy parentAndClone = workingCopyContext.initialize(newWorkdir);
- LOG.debug("initialized new workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), newWorkdir, stopwatch.stop());
- return parentAndClone;
+ return initializeTimer.record(() -> {
+ Stopwatch stopwatch = Stopwatch.createStarted();
+ File newWorkdir = workdirProvider.createNewWorkdir(workingCopyContext.getScmRepository().getId());
+ WorkingCopy parentAndClone = workingCopyContext.initialize(newWorkdir);
+ LOG.debug("initialized new workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), newWorkdir, stopwatch.stop());
+ return parentAndClone;
+ });
}
@Override
public void contextClosed(SimpleWorkingCopyFactory, ?, ?>.WorkingCopyContext workingCopyContext, File workdir) {
- String id = workingCopyContext.getScmRepository().getId();
- File putResult = workdirs.putIfAbsent(id, workdir);
- if (putResult != null && putResult != workdir) {
+ try {
+ putWorkingCopyToCache(workingCopyContext, workdir);
+ } finally {
+ getLock(workingCopyContext).unlock();
+ }
+ }
+
+ private void putWorkingCopyToCache(SimpleWorkingCopyFactory, ?, ?>.WorkingCopyContext workingCopyContext, File workdir) {
+ if (!cacheEnabled) {
deleteWorkdir(workdir);
+ return;
+ }
+ synchronized (workdirs) {
+ workdirs.put(workingCopyContext.getScmRepository().getId(), workdir);
}
}
@@ -118,8 +229,33 @@ public class SimpleCachingWorkingCopyPool implements WorkingCopyPool {
}
private void deleteWorkdir(File workdir) {
+ LOG.debug("deleting old workdir {}", workdir);
if (workdir.exists()) {
- IOUtil.deleteSilently(workdir);
+ deleteTimer.record(() -> IOUtil.deleteSilently(workdir));
+ }
+ }
+
+ private Lock getLock(SimpleWorkingCopyFactory.WorkingCopyContext context) {
+ return locks.computeIfAbsent(context.getScmRepository().getId(), id -> new ReentrantLock(true));
+ }
+
+ @SuppressWarnings("java:S2160") // no need for equals here
+ private class LruMap extends LinkedHashMap {
+ private final int maxSize;
+
+ public LruMap(int maxSize) {
+ super(maxSize);
+ this.maxSize = maxSize;
+ }
+
+ @Override
+ protected boolean removeEldestEntry(Map.Entry eldest) {
+ if (size() > maxSize) {
+ overflowCounter.increment();
+ deleteWorkdir(eldest.getValue());
+ return true;
+ }
+ return false;
}
}
}
diff --git a/scm-core/src/main/java/sonia/scm/repository/work/WorkdirProvider.java b/scm-core/src/main/java/sonia/scm/repository/work/WorkdirProvider.java
index 52124f6c9a..385b6a5dd6 100644
--- a/scm-core/src/main/java/sonia/scm/repository/work/WorkdirProvider.java
+++ b/scm-core/src/main/java/sonia/scm/repository/work/WorkdirProvider.java
@@ -24,15 +24,25 @@
package sonia.scm.repository.work;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import sonia.scm.plugin.Extension;
import sonia.scm.repository.RepositoryLocationResolver;
+import sonia.scm.util.IOUtil;
import javax.inject.Inject;
+import javax.servlet.ServletContextEvent;
+import javax.servlet.ServletContextListener;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.util.Arrays;
-public class WorkdirProvider {
+@Extension
+public class WorkdirProvider implements ServletContextListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(WorkdirProvider.class);
private final File rootDirectory;
private final RepositoryLocationResolver repositoryLocationResolver;
@@ -58,21 +68,58 @@ public class WorkdirProvider {
public File createNewWorkdir(String repositoryId) {
if (useRepositorySpecificDir) {
- return createWorkDir(repositoryLocationResolver.forClass(Path.class).getLocation(repositoryId).resolve("work").toFile());
+ Path repositoryLocation = repositoryLocationResolver.forClass(Path.class).getLocation(repositoryId);
+ File workDirectoryForRepositoryLocation = getWorkDirectoryForRepositoryLocation(repositoryLocation);
+ LOG.debug("creating work dir for repository {} in relative path {}", repositoryId, workDirectoryForRepositoryLocation);
+ return createWorkDir(workDirectoryForRepositoryLocation);
} else {
+ LOG.debug("creating work dir for repository {} in global path", repositoryId);
return createNewWorkdir();
}
}
+ private File getWorkDirectoryForRepositoryLocation(Path repositoryLocation) {
+ return repositoryLocation.resolve("work").toFile();
+ }
+
private File createWorkDir(File baseDirectory) {
// recreate base directory when it may be deleted (see https://github.com/scm-manager/scm-manager/issues/1493 for example)
if (!baseDirectory.exists() && !baseDirectory.mkdirs()) {
throw new WorkdirCreationException(baseDirectory.toString());
}
try {
- return Files.createTempDirectory(baseDirectory.toPath(),"work-").toFile();
+ File newWorkDir = Files.createTempDirectory(baseDirectory.toPath(), "work-").toFile();
+ LOG.debug("created new work dir {}", newWorkDir);
+ return newWorkDir;
} catch (IOException e) {
throw new WorkdirCreationException(baseDirectory.toString(), e);
}
}
+
+ @Override
+ public void contextInitialized(ServletContextEvent sce) {
+ deleteWorkDirs();
+ }
+
+ @Override
+ public void contextDestroyed(ServletContextEvent sce) {
+ deleteWorkDirs();
+ }
+
+ private void deleteWorkDirs() {
+ deleteWorkDirs(rootDirectory);
+ repositoryLocationResolver.forClass(Path.class).forAllLocations(
+ (repo, repositoryLocation) -> deleteWorkDirs(getWorkDirectoryForRepositoryLocation(repositoryLocation))
+ );
+ }
+
+ private void deleteWorkDirs(File root) {
+ File[] workDirs = root.listFiles();
+ if (workDirs != null) {
+ LOG.info("deleting {} old work dirs in {}", workDirs.length, root);
+ Arrays.stream(workDirs)
+ .filter(File::isDirectory)
+ .forEach(IOUtil::deleteSilently);
+ }
+ }
}
diff --git a/scm-core/src/test/java/sonia/scm/repository/work/SimpleCachingWorkingCopyPoolTest.java b/scm-core/src/test/java/sonia/scm/repository/work/SimpleCachingWorkingCopyPoolTest.java
index cda11dc080..1f5096047a 100644
--- a/scm-core/src/test/java/sonia/scm/repository/work/SimpleCachingWorkingCopyPoolTest.java
+++ b/scm-core/src/test/java/sonia/scm/repository/work/SimpleCachingWorkingCopyPoolTest.java
@@ -24,14 +24,17 @@
package sonia.scm.repository.work;
+import io.micrometer.core.instrument.MeterRegistry;
+import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.io.TempDir;
-import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import sonia.scm.repository.Repository;
+import sonia.scm.repository.work.SimpleWorkingCopyFactory.ReclaimFailedException;
import java.io.File;
import java.nio.file.Path;
@@ -40,7 +43,6 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.lenient;
-import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@@ -51,66 +53,121 @@ class SimpleCachingWorkingCopyPoolTest {
@Mock
WorkdirProvider workdirProvider;
- @InjectMocks
+ MeterRegistry meterRegistry = new SimpleMeterRegistry();
+
SimpleCachingWorkingCopyPool simpleCachingWorkingCopyPool;
@Mock
SimpleWorkingCopyFactory