LRU semantic for workdir cache (#1735)

Introduces a maximum size for the simple workdir cache. On cache overflow workdirs are evicted using an LRU strategy.
Furthermore parallel requests for the same repository will now block until the workdir is released.
This commit is contained in:
René Pfeuffer
2021-07-28 07:54:37 +02:00
committed by GitHub
parent f2cc9f67ac
commit ad6000722d
17 changed files with 578 additions and 97 deletions

View File

@@ -281,7 +281,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [2.9.0] - 2020-11-06
### Added
- Tracing api ([#1393](https://github.com/scm-manager/scm-manager/pull/#1393))
- Tracing api ([#1393](https://github.com/scm-manager/scm-manager/pull/1393))
- Automatic user converter for external users ([#1380](https://github.com/scm-manager/scm-manager/pull/1380))
- Create _authenticated group on setup ([#1396](https://github.com/scm-manager/scm-manager/pull/1396))
- The name of the initial git branch can be configured and is set to `main` by default ([#1399](https://github.com/scm-manager/scm-manager/pull/1399))

View File

@@ -0,0 +1,20 @@
---
title: Caching for Working Directories
---
SCM-Manager offers commands to modify repositories on the server side. For example this is used by the
[Editor Plugin](https://www.scm-manager.org/plugins/scm-editor-plugin/) and the
[Review Plugin](https://www.scm-manager.org/plugins/scm-review-plugin/). Without further configuration, this is done
by cloning/checking out the repository temporarily, performing the change, creating a commit and pushing the changes
back to the central repository. The larger the repositories, the longer this may take.
To speed up such changes a lot, SCM-Manager offers a strategy where the local clones will be cached and reused for
subsequent requests. This strategy caches up to a configurable amount of clones (but at most one per repository).
To enable this strategy, add the system property `scm.workingCopyPoolStrategy` to the value
`sonia.scm.repository.work.SimpleCachingWorkingCopyPool`:
```bash
-Dscm.workingCopyPoolStrategy=sonia.scm.repository.work.SimpleCachingWorkingCopyPool
```
The maximum capacity of the cache can be set using the property `scm.workingCopyPoolSize` (the default is 5).

View File

@@ -22,6 +22,7 @@
- /administration/logging/
- /administration/scm-server/
- /administration/reverse-proxies/
- /administration/workdir_caching/
- section: Development
entries:

View File

@@ -0,0 +1,2 @@
- type: changed
description: The simple workdir cache has a maximum size, an lru semantic and blocks on parallel requests ([#1735](https://github.com/scm-manager/scm-manager/pull/1735))

View File

@@ -24,35 +24,53 @@
package sonia.scm.repository.work;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Stopwatch;
import io.micrometer.core.instrument.Counter;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import sonia.scm.util.IOUtil;
import javax.inject.Inject;
import java.io.File;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import static java.lang.Integer.getInteger;
import static java.util.Optional.empty;
import static java.util.Optional.of;
/**
* This class is a simple implementation of the {@link WorkingCopyPool} to demonstrate,
* how caching can work in the simplest way. For the first time a {@link WorkingCopy} is
* how caching can work in an LRU style. For the first time a {@link WorkingCopy} is
* requested for a repository with {@link #getWorkingCopy(SimpleWorkingCopyFactory.WorkingCopyContext)},
* this implementation fetches a new directory from the {@link WorkdirProvider}.
* On {@link #contextClosed(SimpleWorkingCopyFactory.WorkingCopyContext, File)},
* the directory is not deleted, but put into a map with the repository id as key.
* the directory is not deleted, but put into a cache with the repository id as key.
* When a working copy is requested with {@link #getWorkingCopy(SimpleWorkingCopyFactory.WorkingCopyContext)}
* for a repository with such an existing directory, it is taken from the map, reclaimed and
* returned as {@link WorkingCopy}.
* If for one repository a working copy is requested, while another is in use already,
* a second directory is requested from the {@link WorkdirProvider} for the second one.
* If a context is closed with {@link #contextClosed(SimpleWorkingCopyFactory.WorkingCopyContext, File)}
* and there already is a directory stored in the map for the repository,
* the directory from the closed context simply is deleted.
* the process will wait until the other process has finished.
* The number of directories cached is limited. By default, directories are cached for
* {@value DEFAULT_WORKING_COPY_POOL_SIZE} repositories. This can be changes with the system
* property '{@value WORKING_COPY_POOL_SIZE_PROPERTY}' (if this is set to zero, no caching will
* take place; to cache the directories for each repository without eviction simply set this to a
* high enough value).
* <br>
* In general, this implementation should speed up things a bit, but one has to take into
* account, that there is no monitoring of diskspace. So you have to make sure, that
* there is enough space for a clone of each repository in the working dir.
* The usage of this pool has to be enabled by setting the system property `scm.workingCopyPoolStrategy`
* to 'sonia.scm.repository.work.SimpleCachingWorkingCopyPool'.
* <br>
* In general, this implementation should speed up modifications inside SCM-Manager performed by
* the editor plugin or the review plugin, but one has to take into
* account, that the space needed for repositories is multiplied. So you have to make sure, that
* there is enough space for clones of the repository.
* <br>
* Possible enhancements:
* <ul>
@@ -65,49 +83,142 @@ import java.util.concurrent.ConcurrentHashMap;
*/
public class SimpleCachingWorkingCopyPool implements WorkingCopyPool {
public static final int DEFAULT_WORKING_COPY_POOL_SIZE = 5;
public static final String WORKING_COPY_POOL_SIZE_PROPERTY = "scm.workingCopyPoolSize";
private static final Logger LOG = LoggerFactory.getLogger(SimpleCachingWorkingCopyPool.class);
private final Map<String, File> workdirs = new ConcurrentHashMap<>();
private final WorkdirProvider workdirProvider;
private final LinkedHashMap<String, File> workdirs;
private final Map<String, Lock> locks;
private final boolean cacheEnabled;
private final Counter cacheHitCounter;
private final Counter cacheMissCounter;
private final Counter reclaimFailureCounter;
private final Counter overflowCounter;
private final Timer parallelWaitTimer;
private final Timer reclaimTimer;
private final Timer initializeTimer;
private final Timer deleteTimer;
@Inject
public SimpleCachingWorkingCopyPool(WorkdirProvider workdirProvider) {
public SimpleCachingWorkingCopyPool(WorkdirProvider workdirProvider, MeterRegistry meterRegistry) {
this(getInteger(WORKING_COPY_POOL_SIZE_PROPERTY, DEFAULT_WORKING_COPY_POOL_SIZE), workdirProvider, meterRegistry);
}
@VisibleForTesting
SimpleCachingWorkingCopyPool(int size, WorkdirProvider workdirProvider, MeterRegistry meterRegistry) {
this.workdirProvider = workdirProvider;
this.workdirs = new LruMap(size);
this.locks = new ConcurrentHashMap<>();
cacheEnabled = size > 0;
cacheHitCounter = Counter
.builder("scm.workingcopy.pool.cache.hit")
.description("The amount of cache hits for the working copy pool")
.register(meterRegistry);
cacheMissCounter = Counter
.builder("scm.workingcopy.pool.cache.miss")
.description("The amount of cache misses for the working copy pool")
.register(meterRegistry);
reclaimFailureCounter = Counter
.builder("scm.workingcopy.pool.reclaim.failure")
.description("The amount of failed reclaim processes from pool")
.register(meterRegistry);
overflowCounter = Counter
.builder("scm.workingcopy.pool.cache.overflow")
.description("The amount of discarded working copies from pool due to cache overflow")
.register(meterRegistry);
parallelWaitTimer = Timer
.builder("scm.workingcopy.pool.parallel")
.description("Duration of blocking waits for available working copies in pool")
.register(meterRegistry);
reclaimTimer = Timer
.builder("scm.workingcopy.pool.reclaim.duration")
.description("Duration of reclaiming existing working copies in pool")
.register(meterRegistry);
initializeTimer = Timer
.builder("scm.workingcopy.pool.initialize.duration")
.description("Duration of initialization of working copies in pool")
.register(meterRegistry);
deleteTimer = Timer
.builder("scm.workingcopy.pool.delete.duration")
.description("Duration of deletes of working copies from pool")
.register(meterRegistry);
}
@Override
public <R, W> WorkingCopy<R, W> getWorkingCopy(SimpleWorkingCopyFactory<R, W, ?>.WorkingCopyContext workingCopyContext) {
public <R, W> WorkingCopy<R, W> getWorkingCopy(SimpleWorkingCopyFactory<R, W, ?>.WorkingCopyContext context) {
Lock lock = getLock(context);
parallelWaitTimer.record(lock::lock);
try {
return getWorkingCopyFromPoolOrCreate(context);
} catch (RuntimeException e) {
lock.unlock();
throw e;
}
}
private <R, W> WorkingCopy<R, W> getWorkingCopyFromPoolOrCreate(SimpleWorkingCopyFactory<R, W, ?>.WorkingCopyContext workingCopyContext) {
String id = workingCopyContext.getScmRepository().getId();
File existingWorkdir = workdirs.remove(id);
File existingWorkdir;
synchronized (workdirs) {
existingWorkdir = workdirs.remove(id);
}
if (existingWorkdir != null) {
Stopwatch stopwatch = Stopwatch.createStarted();
try {
WorkingCopy<R, W> reclaimed = workingCopyContext.reclaim(existingWorkdir);
LOG.debug("reclaimed workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), existingWorkdir, stopwatch.stop());
return reclaimed;
} catch (SimpleWorkingCopyFactory.ReclaimFailedException e) {
LOG.debug("failed to reclaim workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), existingWorkdir, stopwatch.stop(), e);
deleteWorkdir(existingWorkdir);
Optional<WorkingCopy<R, W>> reclaimedWorkingCopy = tryToReclaim(workingCopyContext, existingWorkdir);
if (reclaimedWorkingCopy.isPresent()) {
cacheHitCounter.increment();
return reclaimedWorkingCopy.get();
}
} else {
cacheMissCounter.increment();
}
return createNewWorkingCopy(workingCopyContext);
}
private <R, W> Optional<WorkingCopy<R, W>> tryToReclaim(SimpleWorkingCopyFactory<R, W, ?>.WorkingCopyContext workingCopyContext, File existingWorkdir) {
return reclaimTimer.record(() -> {
Stopwatch stopwatch = Stopwatch.createStarted();
try {
WorkingCopy<R, W> reclaimed = workingCopyContext.reclaim(existingWorkdir);
LOG.debug("reclaimed workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), existingWorkdir, stopwatch.stop());
return of(reclaimed);
} catch (Exception e) {
LOG.debug("failed to reclaim workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), existingWorkdir, stopwatch.stop(), e);
deleteWorkdir(existingWorkdir);
reclaimFailureCounter.increment();
return empty();
}
});
}
private <R, W> WorkingCopy<R, W> createNewWorkingCopy(SimpleWorkingCopyFactory<R, W, ?>.WorkingCopyContext workingCopyContext) {
Stopwatch stopwatch = Stopwatch.createStarted();
File newWorkdir = workdirProvider.createNewWorkdir(workingCopyContext.getScmRepository().getId());
WorkingCopy<R, W> parentAndClone = workingCopyContext.initialize(newWorkdir);
LOG.debug("initialized new workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), newWorkdir, stopwatch.stop());
return parentAndClone;
return initializeTimer.record(() -> {
Stopwatch stopwatch = Stopwatch.createStarted();
File newWorkdir = workdirProvider.createNewWorkdir(workingCopyContext.getScmRepository().getId());
WorkingCopy<R, W> parentAndClone = workingCopyContext.initialize(newWorkdir);
LOG.debug("initialized new workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), newWorkdir, stopwatch.stop());
return parentAndClone;
});
}
@Override
public void contextClosed(SimpleWorkingCopyFactory<?, ?, ?>.WorkingCopyContext workingCopyContext, File workdir) {
String id = workingCopyContext.getScmRepository().getId();
File putResult = workdirs.putIfAbsent(id, workdir);
if (putResult != null && putResult != workdir) {
try {
putWorkingCopyToCache(workingCopyContext, workdir);
} finally {
getLock(workingCopyContext).unlock();
}
}
private void putWorkingCopyToCache(SimpleWorkingCopyFactory<?, ?, ?>.WorkingCopyContext workingCopyContext, File workdir) {
if (!cacheEnabled) {
deleteWorkdir(workdir);
return;
}
synchronized (workdirs) {
workdirs.put(workingCopyContext.getScmRepository().getId(), workdir);
}
}
@@ -118,8 +229,33 @@ public class SimpleCachingWorkingCopyPool implements WorkingCopyPool {
}
private void deleteWorkdir(File workdir) {
LOG.debug("deleting old workdir {}", workdir);
if (workdir.exists()) {
IOUtil.deleteSilently(workdir);
deleteTimer.record(() -> IOUtil.deleteSilently(workdir));
}
}
private <R, W> Lock getLock(SimpleWorkingCopyFactory<R, W, ?>.WorkingCopyContext context) {
return locks.computeIfAbsent(context.getScmRepository().getId(), id -> new ReentrantLock(true));
}
@SuppressWarnings("java:S2160") // no need for equals here
private class LruMap extends LinkedHashMap<String, File> {
private final int maxSize;
public LruMap(int maxSize) {
super(maxSize);
this.maxSize = maxSize;
}
@Override
protected boolean removeEldestEntry(Map.Entry<String, File> eldest) {
if (size() > maxSize) {
overflowCounter.increment();
deleteWorkdir(eldest.getValue());
return true;
}
return false;
}
}
}

View File

@@ -24,15 +24,25 @@
package sonia.scm.repository.work;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import sonia.scm.plugin.Extension;
import sonia.scm.repository.RepositoryLocationResolver;
import sonia.scm.util.IOUtil;
import javax.inject.Inject;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
public class WorkdirProvider {
@Extension
public class WorkdirProvider implements ServletContextListener {
private static final Logger LOG = LoggerFactory.getLogger(WorkdirProvider.class);
private final File rootDirectory;
private final RepositoryLocationResolver repositoryLocationResolver;
@@ -58,21 +68,58 @@ public class WorkdirProvider {
public File createNewWorkdir(String repositoryId) {
if (useRepositorySpecificDir) {
return createWorkDir(repositoryLocationResolver.forClass(Path.class).getLocation(repositoryId).resolve("work").toFile());
Path repositoryLocation = repositoryLocationResolver.forClass(Path.class).getLocation(repositoryId);
File workDirectoryForRepositoryLocation = getWorkDirectoryForRepositoryLocation(repositoryLocation);
LOG.debug("creating work dir for repository {} in relative path {}", repositoryId, workDirectoryForRepositoryLocation);
return createWorkDir(workDirectoryForRepositoryLocation);
} else {
LOG.debug("creating work dir for repository {} in global path", repositoryId);
return createNewWorkdir();
}
}
private File getWorkDirectoryForRepositoryLocation(Path repositoryLocation) {
return repositoryLocation.resolve("work").toFile();
}
private File createWorkDir(File baseDirectory) {
// recreate base directory when it may be deleted (see https://github.com/scm-manager/scm-manager/issues/1493 for example)
if (!baseDirectory.exists() && !baseDirectory.mkdirs()) {
throw new WorkdirCreationException(baseDirectory.toString());
}
try {
return Files.createTempDirectory(baseDirectory.toPath(),"work-").toFile();
File newWorkDir = Files.createTempDirectory(baseDirectory.toPath(), "work-").toFile();
LOG.debug("created new work dir {}", newWorkDir);
return newWorkDir;
} catch (IOException e) {
throw new WorkdirCreationException(baseDirectory.toString(), e);
}
}
@Override
public void contextInitialized(ServletContextEvent sce) {
deleteWorkDirs();
}
@Override
public void contextDestroyed(ServletContextEvent sce) {
deleteWorkDirs();
}
private void deleteWorkDirs() {
deleteWorkDirs(rootDirectory);
repositoryLocationResolver.forClass(Path.class).forAllLocations(
(repo, repositoryLocation) -> deleteWorkDirs(getWorkDirectoryForRepositoryLocation(repositoryLocation))
);
}
private void deleteWorkDirs(File root) {
File[] workDirs = root.listFiles();
if (workDirs != null) {
LOG.info("deleting {} old work dirs in {}", workDirs.length, root);
Arrays.stream(workDirs)
.filter(File::isDirectory)
.forEach(IOUtil::deleteSilently);
}
}
}

View File

@@ -24,14 +24,17 @@
package sonia.scm.repository.work;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.io.TempDir;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import sonia.scm.repository.Repository;
import sonia.scm.repository.work.SimpleWorkingCopyFactory.ReclaimFailedException;
import java.io.File;
import java.nio.file.Path;
@@ -40,7 +43,6 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@@ -51,66 +53,121 @@ class SimpleCachingWorkingCopyPoolTest {
@Mock
WorkdirProvider workdirProvider;
@InjectMocks
MeterRegistry meterRegistry = new SimpleMeterRegistry();
SimpleCachingWorkingCopyPool simpleCachingWorkingCopyPool;
@Mock
SimpleWorkingCopyFactory<Object, Path, ?>.WorkingCopyContext workingCopyContext;
@BeforeEach
void initContext() throws SimpleWorkingCopyFactory.ReclaimFailedException {
void initContext() throws ReclaimFailedException {
lenient().when(workingCopyContext.initialize(any()))
.thenAnswer(invocationOnMock -> new WorkingCopy<>(null, null, () -> {}, invocationOnMock.getArgument(0, File.class)));
lenient().when(workingCopyContext.reclaim(any()))
.thenAnswer(invocationOnMock -> new WorkingCopy<>(null, null, () -> {}, invocationOnMock.getArgument(0, File.class)));
}
@Test
void shouldCreateNewWorkdirForTheFirstRequest(@TempDir Path temp) {
when(workingCopyContext.getScmRepository()).thenReturn(REPOSITORY);
when(workdirProvider.createNewWorkdir(anyString())).thenReturn(temp.toFile());
@Nested
class WithCache {
WorkingCopy<?, ?> workdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
@BeforeEach
void initContext() {
simpleCachingWorkingCopyPool = new SimpleCachingWorkingCopyPool(2, workdirProvider, meterRegistry);
}
verify(workingCopyContext).initialize(temp.toFile());
@Test
void shouldCreateNewWorkdirForTheFirstRequest(@TempDir Path temp) {
when(workingCopyContext.getScmRepository()).thenReturn(REPOSITORY);
when(workdirProvider.createNewWorkdir(anyString())).thenReturn(temp.toFile());
WorkingCopy<?, ?> workdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
verify(workingCopyContext).initialize(temp.toFile());
assertThat(meterRegistry.get("scm.workingcopy.pool.cache.miss").counter().count()).isEqualTo(1d);
}
@Test
void shouldReuseWorkdirForTheSameRepository(@TempDir Path temp) throws ReclaimFailedException {
when(workingCopyContext.getScmRepository()).thenReturn(REPOSITORY);
when(workdirProvider.createNewWorkdir(anyString())).thenReturn(temp.toFile());
WorkingCopy<?, ?> firstWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
simpleCachingWorkingCopyPool.contextClosed(workingCopyContext, firstWorkdir.getDirectory());
WorkingCopy<?, ?> secondWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
verify(workingCopyContext).initialize(temp.toFile());
verify(workingCopyContext).reclaim(temp.toFile());
assertThat(secondWorkdir.getDirectory()).isEqualTo(temp.toFile());
assertThat(meterRegistry.get("scm.workingcopy.pool.cache.hit").counter().count()).isEqualTo(1d);
}
@Test
void shouldCreateNewWorkdirIfReclaimFails(@TempDir Path temp) throws ReclaimFailedException {
when(workingCopyContext.getScmRepository()).thenReturn(REPOSITORY);
when(workdirProvider.createNewWorkdir(anyString())).thenReturn(temp.resolve("1").toFile(), temp.resolve("2").toFile());
when(workingCopyContext.reclaim(any())).thenThrow(ReclaimFailedException.class);
WorkingCopy<?, ?> firstWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
simpleCachingWorkingCopyPool.contextClosed(workingCopyContext, firstWorkdir.getDirectory());
WorkingCopy<?, ?> secondWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
assertThat(secondWorkdir.getDirectory()).isNotEqualTo(temp.toFile());
assertThat(meterRegistry.get("scm.workingcopy.pool.reclaim.failure").counter().count()).isEqualTo(1d);
}
@Test
void shouldDeleteWorkdirIfCacheSizeReached(@TempDir Path temp) {
fillPool(temp, 3);
assertThat(temp.resolve("path-0")).doesNotExist();
assertThat(temp.resolve("path-1")).exists();
assertThat(temp.resolve("path-2")).exists();
assertThat(meterRegistry.get("scm.workingcopy.pool.cache.overflow").counter().count()).isEqualTo(1d);
}
@Test
void shouldReorderUsedWorkdirsInCache(@TempDir Path temp) {
fillPool(temp, 2);
queryAndCloseWorkdir(temp, 0); // querying first repository again should keep it from eviction
queryAndCloseWorkdir(temp, 2);
assertThat(temp.resolve("path-0")).exists();
assertThat(temp.resolve("path-1")).doesNotExist();
assertThat(temp.resolve("path-2")).exists();
}
}
@Test
void shouldCreateWorkdirOnlyOnceForTheSameRepository(@TempDir Path temp) throws SimpleWorkingCopyFactory.ReclaimFailedException {
when(workingCopyContext.getScmRepository()).thenReturn(REPOSITORY);
when(workdirProvider.createNewWorkdir(anyString())).thenReturn(temp.toFile());
@Nested
class WithoutCaching {
WorkingCopy<?, ?> firstWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
simpleCachingWorkingCopyPool.contextClosed(workingCopyContext, firstWorkdir.getDirectory());
WorkingCopy<?, ?> secondWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
@BeforeEach
void initContext() {
simpleCachingWorkingCopyPool = new SimpleCachingWorkingCopyPool(0, workdirProvider, meterRegistry);
}
verify(workingCopyContext).initialize(temp.toFile());
verify(workingCopyContext).reclaim(temp.toFile());
assertThat(secondWorkdir.getDirectory()).isEqualTo(temp.toFile());
@Test
void shouldNotCacheAnything(@TempDir Path temp) {
fillPool(temp, 2);
assertThat(temp.resolve("path-0")).doesNotExist();
assertThat(temp.resolve("path-1")).doesNotExist();
}
}
@Test
void shouldCacheOnlyOneWorkdirForRepository(@TempDir Path temp) throws SimpleWorkingCopyFactory.ReclaimFailedException {
when(workingCopyContext.getScmRepository()).thenReturn(REPOSITORY);
File firstDirectory = temp.resolve("first").toFile();
firstDirectory.mkdirs();
File secondDirectory = temp.resolve("second").toFile();
secondDirectory.mkdirs();
when(workdirProvider.createNewWorkdir(anyString())).thenReturn(
firstDirectory,
secondDirectory);
private void fillPool(Path temp, int size) {
for (int i = 0; i < size; ++i) {
queryAndCloseWorkdir(temp, i);
}
}
WorkingCopy<?, ?> firstWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
WorkingCopy<?, ?> secondWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
simpleCachingWorkingCopyPool.contextClosed(workingCopyContext, firstWorkdir.getDirectory());
simpleCachingWorkingCopyPool.contextClosed(workingCopyContext, secondWorkdir.getDirectory());
verify(workingCopyContext, never()).reclaim(any());
verify(workingCopyContext).initialize(firstDirectory);
verify(workingCopyContext).initialize(secondDirectory);
assertThat(firstWorkdir.getDirectory()).isNotEqualTo(secondWorkdir.getDirectory());
assertThat(firstWorkdir.getDirectory()).exists();
assertThat(secondWorkdir.getDirectory()).doesNotExist();
private void queryAndCloseWorkdir(Path temp, int index) {
Repository repository = new Repository("repo-" + index, "git", "space", "X" + index);
when(workingCopyContext.getScmRepository()).thenReturn(repository);
String workdirName = "path-" + index;
lenient().doAnswer(invocation -> {
File newWorkdir = temp.resolve(workdirName).toFile();
newWorkdir.mkdirs();
return newWorkdir;
}).when(workdirProvider).createNewWorkdir(anyString());
WorkingCopy<Object, Path> workingCopy = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
simpleCachingWorkingCopyPool.contextClosed(workingCopyContext, workingCopy.getDirectory());
}
}

View File

@@ -25,6 +25,7 @@
package sonia.scm.repository.work;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.io.TempDir;
@@ -34,10 +35,15 @@ import sonia.scm.repository.RepositoryLocationResolver;
import sonia.scm.repository.RepositoryLocationResolver.RepositoryLocationResolverInstance;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.function.BiConsumer;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
@@ -52,7 +58,7 @@ class WorkdirProviderTest {
private RepositoryLocationResolverInstance<Path> repositoryLocationResolverInstance;
@BeforeEach
void initResolver(@TempDir Path temp) {
void initResolver() {
lenient().when(repositoryLocationResolver.forClass(Path.class)).thenReturn(repositoryLocationResolverInstance);
}
@@ -62,8 +68,9 @@ class WorkdirProviderTest {
File newWorkdir = provider.createNewWorkdir();
assertThat(newWorkdir).exists();
assertThat(newWorkdir).hasParent(temp.toFile());
assertThat(newWorkdir)
.exists()
.hasParent(temp.toFile());
verify(repositoryLocationResolverInstance, never()).getLocation(anyString());
}
@@ -86,8 +93,74 @@ class WorkdirProviderTest {
File newWorkdir = provider.createNewWorkdir("42");
assertThat(newWorkdir).exists();
assertThat(newWorkdir).hasParent(temp.toFile());
assertThat(newWorkdir)
.exists()
.hasParent(temp.toFile());
verify(repositoryLocationResolverInstance, never()).getLocation(anyString());
}
@Nested
class WithExistingGlobalWorkDir {
private Path globalRootDir;
private WorkdirProvider provider;
@BeforeEach
void createExistingWorkDir(@TempDir Path temp) throws IOException {
globalRootDir = temp.resolve("global");
Files.createDirectories(globalRootDir.resolve("global-temp"));
provider = new WorkdirProvider(globalRootDir.toFile(), repositoryLocationResolver, true);
}
@Test
void shouldDeleteOldGlobalWorkDirsOnStartup() {
provider.contextInitialized(null);
assertThat(globalRootDir).isEmptyDirectory();
}
@Test
void shouldDeleteOldGlobalWorkDirsOnShutdown() {
provider.contextDestroyed(null);
assertThat(globalRootDir).isEmptyDirectory();
}
}
@Nested
class WithExistingRepositoryWorkDir {
private Path repositoryRootDir;
private WorkdirProvider provider;
@BeforeEach
void createExistingWorkDir(@TempDir Path temp) throws IOException {
repositoryRootDir = temp.resolve("42");
Files.createDirectories(repositoryRootDir.resolve("work").resolve("repo-temp"));
doAnswer(
invocationOnMock -> {
invocationOnMock.getArgument(0, BiConsumer.class)
.accept("42", repositoryRootDir);
return null;
}
).when(repositoryLocationResolverInstance).forAllLocations(any());
provider = new WorkdirProvider(temp.resolve("global").toFile(), repositoryLocationResolver, true);
}
@Test
void shouldDeleteOldRepositoryRelatedWorkDirsOnStartup() {
provider.contextInitialized(null);
assertThat(repositoryRootDir.resolve("work")).isEmptyDirectory();
}
@Test
void shouldDeleteOldRepositoryRelatedWorkDirsOnShutdown() {
provider.contextInitialized(null);
assertThat(repositoryRootDir.resolve("work")).isEmptyDirectory();
}
}
}

View File

@@ -46,10 +46,10 @@ public class GitRepositoryConfigStoreProvider {
}
public GitRepositoryConfig getGitRepositoryConfig(String repositoryId) {
return getFronStore(createStore(repositoryId));
return getFromStore(createStore(repositoryId));
}
private static GitRepositoryConfig getFronStore(ConfigurationStore<GitRepositoryConfig> store) {
private static GitRepositoryConfig getFromStore(ConfigurationStore<GitRepositoryConfig> store) {
return store.getOptional().orElse(new GitRepositoryConfig());
}
@@ -72,7 +72,7 @@ public class GitRepositoryConfigStoreProvider {
@Override
public GitRepositoryConfig get() {
return getFronStore(delegate);
return getFromStore(delegate);
}
@Override

View File

@@ -44,21 +44,22 @@ class GitWorkingCopyReclaimer {
private final GitContext context;
public GitWorkingCopyReclaimer(GitContext context) {
GitWorkingCopyReclaimer(GitContext context) {
this.context = context;
}
public ParentAndClone<Repository, Repository> reclaim(File target, String initialBranch) throws SimpleWorkingCopyFactory.ReclaimFailedException {
LOG.trace("reclaim repository {}", context.getRepository());
String branchToCheckout = determineBranch(initialBranch);
Stopwatch stopwatch = Stopwatch.createStarted();
Repository repo = openTarget(target);
try (Git git = Git.open(target)) {
git.reset().setMode(ResetCommand.ResetType.HARD).call();
git.clean().setForce(true).setCleanDirectories(true).call();
git.fetch().call();
git.checkout().setForced(true).setName("origin/" + initialBranch).call();
git.branchDelete().setBranchNames(initialBranch).setForce(true).call();
git.checkout().setName(initialBranch).setCreateBranch(true).call();
git.checkout().setForced(true).setName("origin/" + branchToCheckout).call();
git.branchDelete().setBranchNames(branchToCheckout).setForce(true).call();
git.checkout().setName(branchToCheckout).setCreateBranch(true).call();
return new ParentAndClone<>(null, repo, target);
} catch (GitAPIException | IOException e) {
throw new SimpleWorkingCopyFactory.ReclaimFailedException(e);
@@ -67,6 +68,16 @@ class GitWorkingCopyReclaimer {
}
}
private String determineBranch(String initialBranch) {
if (initialBranch != null) {
return initialBranch;
}
if (context.getConfig().getDefaultBranch() != null) {
return context.getConfig().getDefaultBranch();
}
return context.getGlobalConfig().getDefaultBranch();
}
private Repository openTarget(File target) throws SimpleWorkingCopyFactory.ReclaimFailedException {
try {
return GitUtil.open(target);

View File

@@ -70,7 +70,7 @@ public class SimpleGitWorkingCopyFactory extends SimpleWorkingCopyFactory<Reposi
}
@Override
protected void closeWorkingCopy(Repository workingCopy) throws Exception {
protected void closeWorkingCopy(Repository workingCopy) {
if (workingCopy != null) {
workingCopy.close();
}

View File

@@ -35,6 +35,7 @@ import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import sonia.scm.repository.GitRepositoryConfig;
import sonia.scm.repository.GitRepositoryHandler;
import sonia.scm.repository.GitTestHelper;
import sonia.scm.repository.PreProcessorUtil;
@@ -149,6 +150,32 @@ public class SimpleGitWorkingCopyFactoryTest extends AbstractGitCommandTestBase
assertBranchCheckedOutAndClean(workdir, "master");
}
@Test
public void shouldReclaimCleanDirectoryConfiguredDefaultBranch() throws Exception {
SimpleGitWorkingCopyFactory factory = new SimpleGitWorkingCopyFactory(new NoneCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry());
File workdir = createExistingClone(factory);
GitContext context = createContext();
GitRepositoryConfig config = context.getConfig();
config.setDefaultBranch("master");
context.setConfig(config);
factory.reclaim(context, workdir, null);
assertBranchCheckedOutAndClean(workdir, "master");
}
@Test
public void shouldReclaimCleanDirectoryGloballyConfiguredDefaultBranch() throws Exception {
SimpleGitWorkingCopyFactory factory = new SimpleGitWorkingCopyFactory(new NoneCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry());
File workdir = createExistingClone(factory);
GitContext context = createContext();
context.getGlobalConfig().setDefaultBranch("master");
factory.reclaim(context, workdir, null);
assertBranchCheckedOutAndClean(workdir, "master");
}
@Test
public void shouldReclaimCleanDirectoryWithOtherBranch() throws Exception {
SimpleGitWorkingCopyFactory factory = new SimpleGitWorkingCopyFactory(new NoneCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry());
@@ -192,6 +219,7 @@ public class SimpleGitWorkingCopyFactoryTest extends AbstractGitCommandTestBase
factory.reclaim(createContext(), workdir, "master");
assertBranchCheckedOutAndClean(workdir, "master");
assertThat(newDirectory).doesNotExist();
}
public File createExistingClone(SimpleGitWorkingCopyFactory factory) throws Exception {

View File

@@ -25,6 +25,7 @@
package sonia.scm.repository;
import com.aragost.javahg.RepositoryConfiguration;
import com.aragost.javahg.ext.purge.PurgeExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import sonia.scm.repository.hooks.HookEnvironment;
@@ -69,6 +70,7 @@ public class HgRepositoryFactory {
RepositoryConfiguration repoConfiguration = RepositoryConfiguration.DEFAULT;
repoConfiguration.getEnvironment().putAll(environment);
repoConfiguration.addExtension(HgFileviewExtension.class);
repoConfiguration.addExtension(PurgeExtension.class);
boolean pending = hookEnvironment.isPending();
repoConfiguration.setEnablePendingChangesets(pending);

View File

@@ -32,6 +32,7 @@ import com.aragost.javahg.commands.PullCommand;
import com.aragost.javahg.commands.StatusCommand;
import com.aragost.javahg.commands.UpdateCommand;
import com.aragost.javahg.commands.flags.CloneCommandFlags;
import com.aragost.javahg.ext.purge.PurgeCommand;
import io.micrometer.core.instrument.MeterRegistry;
import sonia.scm.repository.HgExtensions;
import sonia.scm.repository.InternalRepositoryException;
@@ -78,7 +79,9 @@ public class SimpleHgWorkingCopyFactory extends SimpleWorkingCopyFactory<Reposit
for (String unknown : StatusCommand.on(clone).execute().getUnknown()) {
delete(clone.getDirectory(), unknown);
}
UpdateCommand.on(clone).rev(initialBranch).clean().execute();
String branchToCheckOut = initialBranch == null ? "default" : initialBranch;
UpdateCommand.on(clone).rev(branchToCheckOut).clean().execute();
PurgeCommand.on(clone).execute();
return new ParentAndClone<>(centralRepository, clone, target);
} catch (ExecutionException | IOException e) {
throw new ReclaimFailedException(e);

View File

@@ -24,12 +24,19 @@
package sonia.scm.repository.spi;
import com.aragost.javahg.BaseRepository;
import com.aragost.javahg.Repository;
import com.aragost.javahg.commands.BranchCommand;
import com.aragost.javahg.commands.RemoveCommand;
import com.aragost.javahg.commands.StatusCommand;
import com.aragost.javahg.commands.results.StatusResult;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import sonia.scm.repository.work.NoneCachingWorkingCopyPool;
import sonia.scm.repository.work.SimpleCachingWorkingCopyPool;
import sonia.scm.repository.work.WorkdirProvider;
import sonia.scm.repository.work.WorkingCopy;
@@ -38,6 +45,7 @@ import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Collections;
import static org.assertj.core.api.Assertions.assertThat;
@@ -46,6 +54,7 @@ public class SimpleHgWorkingCopyFactoryTest extends AbstractHgCommandTestBase {
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
private MeterRegistry meterRegistry = new SimpleMeterRegistry();
private WorkdirProvider workdirProvider;
@@ -54,7 +63,7 @@ public class SimpleHgWorkingCopyFactoryTest extends AbstractHgCommandTestBase {
@Before
public void bindScmProtocol() throws IOException {
workdirProvider = new WorkdirProvider(temporaryFolder.newFolder(), repositoryLocationResolver, false);
workingCopyFactory = new SimpleHgWorkingCopyFactory(new SimpleCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry()) {
workingCopyFactory = new SimpleHgWorkingCopyFactory(new SimpleCachingWorkingCopyPool(workdirProvider, meterRegistry), new SimpleMeterRegistry()) {
@Override
public void configure(com.aragost.javahg.commands.PullCommand pullCommand) {
// we do not want to configure http hooks in this unit test
@@ -134,6 +143,95 @@ public class SimpleHgWorkingCopyFactoryTest extends AbstractHgCommandTestBase {
WorkingCopy<Repository, Repository> cachedWorkingCopy = workingCopyFactory.createWorkingCopy(cmdContext, "default");
assertThat(cachedWorkingCopy.getDirectory()).isEqualTo(initialDirectory);
assertThat(cachedWorkingCopy.getDirectory().toPath().resolve("newDir")).isEmptyDirectory();
assertThat(cachedWorkingCopy.getDirectory().toPath().resolve("newDir")).doesNotExist();
}
@Test
public void shouldReclaimCleanDirectoryWithSameBranch() throws Exception {
SimpleHgWorkingCopyFactory factory = new SimpleHgWorkingCopyFactory(new NoneCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry());
File workdir = createExistingClone(factory);
factory.reclaim(cmdContext, workdir, "default");
assertBranchCheckedOutAndClean(workdir, "default");
}
@Test
public void shouldReclaimCleanDirectoryWithDefaultBranch() throws Exception {
SimpleHgWorkingCopyFactory factory = new SimpleHgWorkingCopyFactory(new NoneCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry());
File workdir = createExistingClone(factory);
factory.reclaim(cmdContext, workdir, null);
assertBranchCheckedOutAndClean(workdir, "default");
}
@Test
public void shouldReclaimCleanDirectoryWithOtherBranch() throws Exception {
SimpleHgWorkingCopyFactory factory = new SimpleHgWorkingCopyFactory(new NoneCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry());
File workdir = createExistingClone(factory);
factory.reclaim(cmdContext, workdir, "test-branch");
assertBranchCheckedOutAndClean(workdir, "test-branch");
}
@Test
public void shouldReclaimDirectoryWithDeletedFileInIndex() throws Exception {
SimpleHgWorkingCopyFactory factory = new SimpleHgWorkingCopyFactory(new NoneCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry());
File workdir = createExistingClone(factory);
RemoveCommand.on(Repository.open(workdir)).execute("a.txt");
factory.reclaim(cmdContext, workdir, "default");
assertBranchCheckedOutAndClean(workdir, "default");
}
@Test
public void shouldReclaimDirectoryWithDeletedFileInDirectory() throws Exception {
SimpleHgWorkingCopyFactory factory = new SimpleHgWorkingCopyFactory(new NoneCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry());
File workdir = createExistingClone(factory);
RemoveCommand.on(Repository.open(workdir)).execute("c");
factory.reclaim(cmdContext, workdir, "default");
assertBranchCheckedOutAndClean(workdir, "default");
}
@Test
public void shouldReclaimDirectoryWithAdditionalFileInDirectory() throws Exception {
SimpleHgWorkingCopyFactory factory = new SimpleHgWorkingCopyFactory(new NoneCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry());
File workdir = createExistingClone(factory);
Path newDirectory = workdir.toPath().resolve("new");
Files.createDirectories(newDirectory);
Files.createFile(newDirectory.resolve("newFile"));
factory.reclaim(cmdContext, workdir, "default");
assertBranchCheckedOutAndClean(workdir, "default");
assertThat(newDirectory).doesNotExist();
}
private void assertBranchCheckedOutAndClean(File workdir, String expectedBranch) {
BaseRepository repository = Repository.open(workdir);
StatusResult statusResult = StatusCommand.on(repository).execute();
assertThat(statusResult.getAdded()).isEmpty();
assertThat(statusResult.getCopied()).isEmpty();
assertThat(statusResult.getIgnored()).isEmpty();
assertThat(statusResult.getMissing()).isEmpty();
assertThat(statusResult.getModified()).isEmpty();
assertThat(statusResult.getRemoved()).isEmpty();
assertThat(statusResult.getUnknown()).isEmpty();
assertThat(BranchCommand.on(repository).get()).isEqualTo(expectedBranch);
}
public File createExistingClone(SimpleHgWorkingCopyFactory factory) throws Exception {
File workdir = temporaryFolder.newFolder();
extract(workdir, "sonia/scm/repository/spi/scm-hg-spi-workdir-test.zip");
Files.write(workdir.toPath().resolve(".hg").resolve("hgrc"), Arrays.asList("[paths]", "default = " + repositoryDirectory.getAbsolutePath()));
return workdir;
}
}

View File

@@ -24,6 +24,7 @@
package sonia.scm.repository.spi;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import org.junit.Before;
import org.junit.Rule;
@@ -44,17 +45,19 @@ public class SimpleSvnWorkingCopyFactoryTest extends AbstractSvnCommandTestBase
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
private MeterRegistry meterRegistry = new SimpleMeterRegistry();
// keep this so that it will not be garbage collected (Transport keeps this in a week reference)
private WorkdirProvider workdirProvider;
@Before
public void initWorkDirProvider() throws IOException {
workdirProvider = new WorkdirProvider(temporaryFolder.newFolder(), repositoryLocationResolver, false);
}
@Test
public void shouldCheckoutLatestRevision() throws SVNException, IOException {
public void shouldCheckoutLatestRevision() {
SimpleSvnWorkingCopyFactory factory = new SimpleSvnWorkingCopyFactory(new NoneCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry());
try (WorkingCopy<File, File> workingCopy = factory.createWorkingCopy(createContext(), null)) {
@@ -96,7 +99,7 @@ public class SimpleSvnWorkingCopyFactoryTest extends AbstractSvnCommandTestBase
@Test
public void shouldDeleteUntrackedFileOnReclaim() throws IOException {
SimpleSvnWorkingCopyFactory factory = new SimpleSvnWorkingCopyFactory(new SimpleCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry());
SimpleSvnWorkingCopyFactory factory = new SimpleSvnWorkingCopyFactory(new SimpleCachingWorkingCopyPool(workdirProvider, meterRegistry), new SimpleMeterRegistry());
WorkingCopy<File, File> workingCopy = factory.createWorkingCopy(createContext(), null);
File directory = workingCopy.getWorkingRepository();
@@ -113,8 +116,8 @@ public class SimpleSvnWorkingCopyFactoryTest extends AbstractSvnCommandTestBase
}
@Test
public void shouldRestoreDeletedFileOnReclaim() throws IOException {
SimpleSvnWorkingCopyFactory factory = new SimpleSvnWorkingCopyFactory(new SimpleCachingWorkingCopyPool(workdirProvider), new SimpleMeterRegistry());
public void shouldRestoreDeletedFileOnReclaim() {
SimpleSvnWorkingCopyFactory factory = new SimpleSvnWorkingCopyFactory(new SimpleCachingWorkingCopyPool(workdirProvider, meterRegistry), new SimpleMeterRegistry());
WorkingCopy<File, File> workingCopy = factory.createWorkingCopy(createContext(), null);
File directory = workingCopy.getWorkingRepository();