LRU semantic for workdir cache (#1735)

Introduces a maximum size for the simple workdir cache. On cache overflow workdirs are evicted using an LRU strategy.
Furthermore parallel requests for the same repository will now block until the workdir is released.
This commit is contained in:
René Pfeuffer
2021-07-28 07:54:37 +02:00
committed by GitHub
parent f2cc9f67ac
commit ad6000722d
17 changed files with 578 additions and 97 deletions

View File

@@ -24,35 +24,53 @@
package sonia.scm.repository.work;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Stopwatch;
import io.micrometer.core.instrument.Counter;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import sonia.scm.util.IOUtil;
import javax.inject.Inject;
import java.io.File;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import static java.lang.Integer.getInteger;
import static java.util.Optional.empty;
import static java.util.Optional.of;
/**
* This class is a simple implementation of the {@link WorkingCopyPool} to demonstrate,
* how caching can work in the simplest way. For the first time a {@link WorkingCopy} is
* how caching can work in an LRU style. For the first time a {@link WorkingCopy} is
* requested for a repository with {@link #getWorkingCopy(SimpleWorkingCopyFactory.WorkingCopyContext)},
* this implementation fetches a new directory from the {@link WorkdirProvider}.
* On {@link #contextClosed(SimpleWorkingCopyFactory.WorkingCopyContext, File)},
* the directory is not deleted, but put into a map with the repository id as key.
* the directory is not deleted, but put into a cache with the repository id as key.
* When a working copy is requested with {@link #getWorkingCopy(SimpleWorkingCopyFactory.WorkingCopyContext)}
* for a repository with such an existing directory, it is taken from the map, reclaimed and
* returned as {@link WorkingCopy}.
* If for one repository a working copy is requested, while another is in use already,
* a second directory is requested from the {@link WorkdirProvider} for the second one.
* If a context is closed with {@link #contextClosed(SimpleWorkingCopyFactory.WorkingCopyContext, File)}
* and there already is a directory stored in the map for the repository,
* the directory from the closed context simply is deleted.
* the process will wait until the other process has finished.
* The number of directories cached is limited. By default, directories are cached for
* {@value DEFAULT_WORKING_COPY_POOL_SIZE} repositories. This can be changes with the system
* property '{@value WORKING_COPY_POOL_SIZE_PROPERTY}' (if this is set to zero, no caching will
* take place; to cache the directories for each repository without eviction simply set this to a
* high enough value).
* <br>
* In general, this implementation should speed up things a bit, but one has to take into
* account, that there is no monitoring of diskspace. So you have to make sure, that
* there is enough space for a clone of each repository in the working dir.
* The usage of this pool has to be enabled by setting the system property `scm.workingCopyPoolStrategy`
* to 'sonia.scm.repository.work.SimpleCachingWorkingCopyPool'.
* <br>
* In general, this implementation should speed up modifications inside SCM-Manager performed by
* the editor plugin or the review plugin, but one has to take into
* account, that the space needed for repositories is multiplied. So you have to make sure, that
* there is enough space for clones of the repository.
* <br>
* Possible enhancements:
* <ul>
@@ -65,49 +83,142 @@ import java.util.concurrent.ConcurrentHashMap;
*/
public class SimpleCachingWorkingCopyPool implements WorkingCopyPool {
public static final int DEFAULT_WORKING_COPY_POOL_SIZE = 5;
public static final String WORKING_COPY_POOL_SIZE_PROPERTY = "scm.workingCopyPoolSize";
private static final Logger LOG = LoggerFactory.getLogger(SimpleCachingWorkingCopyPool.class);
private final Map<String, File> workdirs = new ConcurrentHashMap<>();
private final WorkdirProvider workdirProvider;
private final LinkedHashMap<String, File> workdirs;
private final Map<String, Lock> locks;
private final boolean cacheEnabled;
private final Counter cacheHitCounter;
private final Counter cacheMissCounter;
private final Counter reclaimFailureCounter;
private final Counter overflowCounter;
private final Timer parallelWaitTimer;
private final Timer reclaimTimer;
private final Timer initializeTimer;
private final Timer deleteTimer;
@Inject
public SimpleCachingWorkingCopyPool(WorkdirProvider workdirProvider) {
public SimpleCachingWorkingCopyPool(WorkdirProvider workdirProvider, MeterRegistry meterRegistry) {
this(getInteger(WORKING_COPY_POOL_SIZE_PROPERTY, DEFAULT_WORKING_COPY_POOL_SIZE), workdirProvider, meterRegistry);
}
@VisibleForTesting
SimpleCachingWorkingCopyPool(int size, WorkdirProvider workdirProvider, MeterRegistry meterRegistry) {
this.workdirProvider = workdirProvider;
this.workdirs = new LruMap(size);
this.locks = new ConcurrentHashMap<>();
cacheEnabled = size > 0;
cacheHitCounter = Counter
.builder("scm.workingcopy.pool.cache.hit")
.description("The amount of cache hits for the working copy pool")
.register(meterRegistry);
cacheMissCounter = Counter
.builder("scm.workingcopy.pool.cache.miss")
.description("The amount of cache misses for the working copy pool")
.register(meterRegistry);
reclaimFailureCounter = Counter
.builder("scm.workingcopy.pool.reclaim.failure")
.description("The amount of failed reclaim processes from pool")
.register(meterRegistry);
overflowCounter = Counter
.builder("scm.workingcopy.pool.cache.overflow")
.description("The amount of discarded working copies from pool due to cache overflow")
.register(meterRegistry);
parallelWaitTimer = Timer
.builder("scm.workingcopy.pool.parallel")
.description("Duration of blocking waits for available working copies in pool")
.register(meterRegistry);
reclaimTimer = Timer
.builder("scm.workingcopy.pool.reclaim.duration")
.description("Duration of reclaiming existing working copies in pool")
.register(meterRegistry);
initializeTimer = Timer
.builder("scm.workingcopy.pool.initialize.duration")
.description("Duration of initialization of working copies in pool")
.register(meterRegistry);
deleteTimer = Timer
.builder("scm.workingcopy.pool.delete.duration")
.description("Duration of deletes of working copies from pool")
.register(meterRegistry);
}
@Override
public <R, W> WorkingCopy<R, W> getWorkingCopy(SimpleWorkingCopyFactory<R, W, ?>.WorkingCopyContext workingCopyContext) {
public <R, W> WorkingCopy<R, W> getWorkingCopy(SimpleWorkingCopyFactory<R, W, ?>.WorkingCopyContext context) {
Lock lock = getLock(context);
parallelWaitTimer.record(lock::lock);
try {
return getWorkingCopyFromPoolOrCreate(context);
} catch (RuntimeException e) {
lock.unlock();
throw e;
}
}
private <R, W> WorkingCopy<R, W> getWorkingCopyFromPoolOrCreate(SimpleWorkingCopyFactory<R, W, ?>.WorkingCopyContext workingCopyContext) {
String id = workingCopyContext.getScmRepository().getId();
File existingWorkdir = workdirs.remove(id);
File existingWorkdir;
synchronized (workdirs) {
existingWorkdir = workdirs.remove(id);
}
if (existingWorkdir != null) {
Stopwatch stopwatch = Stopwatch.createStarted();
try {
WorkingCopy<R, W> reclaimed = workingCopyContext.reclaim(existingWorkdir);
LOG.debug("reclaimed workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), existingWorkdir, stopwatch.stop());
return reclaimed;
} catch (SimpleWorkingCopyFactory.ReclaimFailedException e) {
LOG.debug("failed to reclaim workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), existingWorkdir, stopwatch.stop(), e);
deleteWorkdir(existingWorkdir);
Optional<WorkingCopy<R, W>> reclaimedWorkingCopy = tryToReclaim(workingCopyContext, existingWorkdir);
if (reclaimedWorkingCopy.isPresent()) {
cacheHitCounter.increment();
return reclaimedWorkingCopy.get();
}
} else {
cacheMissCounter.increment();
}
return createNewWorkingCopy(workingCopyContext);
}
private <R, W> Optional<WorkingCopy<R, W>> tryToReclaim(SimpleWorkingCopyFactory<R, W, ?>.WorkingCopyContext workingCopyContext, File existingWorkdir) {
return reclaimTimer.record(() -> {
Stopwatch stopwatch = Stopwatch.createStarted();
try {
WorkingCopy<R, W> reclaimed = workingCopyContext.reclaim(existingWorkdir);
LOG.debug("reclaimed workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), existingWorkdir, stopwatch.stop());
return of(reclaimed);
} catch (Exception e) {
LOG.debug("failed to reclaim workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), existingWorkdir, stopwatch.stop(), e);
deleteWorkdir(existingWorkdir);
reclaimFailureCounter.increment();
return empty();
}
});
}
private <R, W> WorkingCopy<R, W> createNewWorkingCopy(SimpleWorkingCopyFactory<R, W, ?>.WorkingCopyContext workingCopyContext) {
Stopwatch stopwatch = Stopwatch.createStarted();
File newWorkdir = workdirProvider.createNewWorkdir(workingCopyContext.getScmRepository().getId());
WorkingCopy<R, W> parentAndClone = workingCopyContext.initialize(newWorkdir);
LOG.debug("initialized new workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), newWorkdir, stopwatch.stop());
return parentAndClone;
return initializeTimer.record(() -> {
Stopwatch stopwatch = Stopwatch.createStarted();
File newWorkdir = workdirProvider.createNewWorkdir(workingCopyContext.getScmRepository().getId());
WorkingCopy<R, W> parentAndClone = workingCopyContext.initialize(newWorkdir);
LOG.debug("initialized new workdir for {} in path {} in {}", workingCopyContext.getScmRepository(), newWorkdir, stopwatch.stop());
return parentAndClone;
});
}
@Override
public void contextClosed(SimpleWorkingCopyFactory<?, ?, ?>.WorkingCopyContext workingCopyContext, File workdir) {
String id = workingCopyContext.getScmRepository().getId();
File putResult = workdirs.putIfAbsent(id, workdir);
if (putResult != null && putResult != workdir) {
try {
putWorkingCopyToCache(workingCopyContext, workdir);
} finally {
getLock(workingCopyContext).unlock();
}
}
private void putWorkingCopyToCache(SimpleWorkingCopyFactory<?, ?, ?>.WorkingCopyContext workingCopyContext, File workdir) {
if (!cacheEnabled) {
deleteWorkdir(workdir);
return;
}
synchronized (workdirs) {
workdirs.put(workingCopyContext.getScmRepository().getId(), workdir);
}
}
@@ -118,8 +229,33 @@ public class SimpleCachingWorkingCopyPool implements WorkingCopyPool {
}
private void deleteWorkdir(File workdir) {
LOG.debug("deleting old workdir {}", workdir);
if (workdir.exists()) {
IOUtil.deleteSilently(workdir);
deleteTimer.record(() -> IOUtil.deleteSilently(workdir));
}
}
private <R, W> Lock getLock(SimpleWorkingCopyFactory<R, W, ?>.WorkingCopyContext context) {
return locks.computeIfAbsent(context.getScmRepository().getId(), id -> new ReentrantLock(true));
}
@SuppressWarnings("java:S2160") // no need for equals here
private class LruMap extends LinkedHashMap<String, File> {
private final int maxSize;
public LruMap(int maxSize) {
super(maxSize);
this.maxSize = maxSize;
}
@Override
protected boolean removeEldestEntry(Map.Entry<String, File> eldest) {
if (size() > maxSize) {
overflowCounter.increment();
deleteWorkdir(eldest.getValue());
return true;
}
return false;
}
}
}

View File

@@ -24,15 +24,25 @@
package sonia.scm.repository.work;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import sonia.scm.plugin.Extension;
import sonia.scm.repository.RepositoryLocationResolver;
import sonia.scm.util.IOUtil;
import javax.inject.Inject;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
public class WorkdirProvider {
@Extension
public class WorkdirProvider implements ServletContextListener {
private static final Logger LOG = LoggerFactory.getLogger(WorkdirProvider.class);
private final File rootDirectory;
private final RepositoryLocationResolver repositoryLocationResolver;
@@ -58,21 +68,58 @@ public class WorkdirProvider {
public File createNewWorkdir(String repositoryId) {
if (useRepositorySpecificDir) {
return createWorkDir(repositoryLocationResolver.forClass(Path.class).getLocation(repositoryId).resolve("work").toFile());
Path repositoryLocation = repositoryLocationResolver.forClass(Path.class).getLocation(repositoryId);
File workDirectoryForRepositoryLocation = getWorkDirectoryForRepositoryLocation(repositoryLocation);
LOG.debug("creating work dir for repository {} in relative path {}", repositoryId, workDirectoryForRepositoryLocation);
return createWorkDir(workDirectoryForRepositoryLocation);
} else {
LOG.debug("creating work dir for repository {} in global path", repositoryId);
return createNewWorkdir();
}
}
private File getWorkDirectoryForRepositoryLocation(Path repositoryLocation) {
return repositoryLocation.resolve("work").toFile();
}
private File createWorkDir(File baseDirectory) {
// recreate base directory when it may be deleted (see https://github.com/scm-manager/scm-manager/issues/1493 for example)
if (!baseDirectory.exists() && !baseDirectory.mkdirs()) {
throw new WorkdirCreationException(baseDirectory.toString());
}
try {
return Files.createTempDirectory(baseDirectory.toPath(),"work-").toFile();
File newWorkDir = Files.createTempDirectory(baseDirectory.toPath(), "work-").toFile();
LOG.debug("created new work dir {}", newWorkDir);
return newWorkDir;
} catch (IOException e) {
throw new WorkdirCreationException(baseDirectory.toString(), e);
}
}
@Override
public void contextInitialized(ServletContextEvent sce) {
deleteWorkDirs();
}
@Override
public void contextDestroyed(ServletContextEvent sce) {
deleteWorkDirs();
}
private void deleteWorkDirs() {
deleteWorkDirs(rootDirectory);
repositoryLocationResolver.forClass(Path.class).forAllLocations(
(repo, repositoryLocation) -> deleteWorkDirs(getWorkDirectoryForRepositoryLocation(repositoryLocation))
);
}
private void deleteWorkDirs(File root) {
File[] workDirs = root.listFiles();
if (workDirs != null) {
LOG.info("deleting {} old work dirs in {}", workDirs.length, root);
Arrays.stream(workDirs)
.filter(File::isDirectory)
.forEach(IOUtil::deleteSilently);
}
}
}

View File

@@ -24,14 +24,17 @@
package sonia.scm.repository.work;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.io.TempDir;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import sonia.scm.repository.Repository;
import sonia.scm.repository.work.SimpleWorkingCopyFactory.ReclaimFailedException;
import java.io.File;
import java.nio.file.Path;
@@ -40,7 +43,6 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@@ -51,66 +53,121 @@ class SimpleCachingWorkingCopyPoolTest {
@Mock
WorkdirProvider workdirProvider;
@InjectMocks
MeterRegistry meterRegistry = new SimpleMeterRegistry();
SimpleCachingWorkingCopyPool simpleCachingWorkingCopyPool;
@Mock
SimpleWorkingCopyFactory<Object, Path, ?>.WorkingCopyContext workingCopyContext;
@BeforeEach
void initContext() throws SimpleWorkingCopyFactory.ReclaimFailedException {
void initContext() throws ReclaimFailedException {
lenient().when(workingCopyContext.initialize(any()))
.thenAnswer(invocationOnMock -> new WorkingCopy<>(null, null, () -> {}, invocationOnMock.getArgument(0, File.class)));
lenient().when(workingCopyContext.reclaim(any()))
.thenAnswer(invocationOnMock -> new WorkingCopy<>(null, null, () -> {}, invocationOnMock.getArgument(0, File.class)));
}
@Test
void shouldCreateNewWorkdirForTheFirstRequest(@TempDir Path temp) {
when(workingCopyContext.getScmRepository()).thenReturn(REPOSITORY);
when(workdirProvider.createNewWorkdir(anyString())).thenReturn(temp.toFile());
@Nested
class WithCache {
WorkingCopy<?, ?> workdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
@BeforeEach
void initContext() {
simpleCachingWorkingCopyPool = new SimpleCachingWorkingCopyPool(2, workdirProvider, meterRegistry);
}
verify(workingCopyContext).initialize(temp.toFile());
@Test
void shouldCreateNewWorkdirForTheFirstRequest(@TempDir Path temp) {
when(workingCopyContext.getScmRepository()).thenReturn(REPOSITORY);
when(workdirProvider.createNewWorkdir(anyString())).thenReturn(temp.toFile());
WorkingCopy<?, ?> workdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
verify(workingCopyContext).initialize(temp.toFile());
assertThat(meterRegistry.get("scm.workingcopy.pool.cache.miss").counter().count()).isEqualTo(1d);
}
@Test
void shouldReuseWorkdirForTheSameRepository(@TempDir Path temp) throws ReclaimFailedException {
when(workingCopyContext.getScmRepository()).thenReturn(REPOSITORY);
when(workdirProvider.createNewWorkdir(anyString())).thenReturn(temp.toFile());
WorkingCopy<?, ?> firstWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
simpleCachingWorkingCopyPool.contextClosed(workingCopyContext, firstWorkdir.getDirectory());
WorkingCopy<?, ?> secondWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
verify(workingCopyContext).initialize(temp.toFile());
verify(workingCopyContext).reclaim(temp.toFile());
assertThat(secondWorkdir.getDirectory()).isEqualTo(temp.toFile());
assertThat(meterRegistry.get("scm.workingcopy.pool.cache.hit").counter().count()).isEqualTo(1d);
}
@Test
void shouldCreateNewWorkdirIfReclaimFails(@TempDir Path temp) throws ReclaimFailedException {
when(workingCopyContext.getScmRepository()).thenReturn(REPOSITORY);
when(workdirProvider.createNewWorkdir(anyString())).thenReturn(temp.resolve("1").toFile(), temp.resolve("2").toFile());
when(workingCopyContext.reclaim(any())).thenThrow(ReclaimFailedException.class);
WorkingCopy<?, ?> firstWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
simpleCachingWorkingCopyPool.contextClosed(workingCopyContext, firstWorkdir.getDirectory());
WorkingCopy<?, ?> secondWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
assertThat(secondWorkdir.getDirectory()).isNotEqualTo(temp.toFile());
assertThat(meterRegistry.get("scm.workingcopy.pool.reclaim.failure").counter().count()).isEqualTo(1d);
}
@Test
void shouldDeleteWorkdirIfCacheSizeReached(@TempDir Path temp) {
fillPool(temp, 3);
assertThat(temp.resolve("path-0")).doesNotExist();
assertThat(temp.resolve("path-1")).exists();
assertThat(temp.resolve("path-2")).exists();
assertThat(meterRegistry.get("scm.workingcopy.pool.cache.overflow").counter().count()).isEqualTo(1d);
}
@Test
void shouldReorderUsedWorkdirsInCache(@TempDir Path temp) {
fillPool(temp, 2);
queryAndCloseWorkdir(temp, 0); // querying first repository again should keep it from eviction
queryAndCloseWorkdir(temp, 2);
assertThat(temp.resolve("path-0")).exists();
assertThat(temp.resolve("path-1")).doesNotExist();
assertThat(temp.resolve("path-2")).exists();
}
}
@Test
void shouldCreateWorkdirOnlyOnceForTheSameRepository(@TempDir Path temp) throws SimpleWorkingCopyFactory.ReclaimFailedException {
when(workingCopyContext.getScmRepository()).thenReturn(REPOSITORY);
when(workdirProvider.createNewWorkdir(anyString())).thenReturn(temp.toFile());
@Nested
class WithoutCaching {
WorkingCopy<?, ?> firstWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
simpleCachingWorkingCopyPool.contextClosed(workingCopyContext, firstWorkdir.getDirectory());
WorkingCopy<?, ?> secondWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
@BeforeEach
void initContext() {
simpleCachingWorkingCopyPool = new SimpleCachingWorkingCopyPool(0, workdirProvider, meterRegistry);
}
verify(workingCopyContext).initialize(temp.toFile());
verify(workingCopyContext).reclaim(temp.toFile());
assertThat(secondWorkdir.getDirectory()).isEqualTo(temp.toFile());
@Test
void shouldNotCacheAnything(@TempDir Path temp) {
fillPool(temp, 2);
assertThat(temp.resolve("path-0")).doesNotExist();
assertThat(temp.resolve("path-1")).doesNotExist();
}
}
@Test
void shouldCacheOnlyOneWorkdirForRepository(@TempDir Path temp) throws SimpleWorkingCopyFactory.ReclaimFailedException {
when(workingCopyContext.getScmRepository()).thenReturn(REPOSITORY);
File firstDirectory = temp.resolve("first").toFile();
firstDirectory.mkdirs();
File secondDirectory = temp.resolve("second").toFile();
secondDirectory.mkdirs();
when(workdirProvider.createNewWorkdir(anyString())).thenReturn(
firstDirectory,
secondDirectory);
private void fillPool(Path temp, int size) {
for (int i = 0; i < size; ++i) {
queryAndCloseWorkdir(temp, i);
}
}
WorkingCopy<?, ?> firstWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
WorkingCopy<?, ?> secondWorkdir = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
simpleCachingWorkingCopyPool.contextClosed(workingCopyContext, firstWorkdir.getDirectory());
simpleCachingWorkingCopyPool.contextClosed(workingCopyContext, secondWorkdir.getDirectory());
verify(workingCopyContext, never()).reclaim(any());
verify(workingCopyContext).initialize(firstDirectory);
verify(workingCopyContext).initialize(secondDirectory);
assertThat(firstWorkdir.getDirectory()).isNotEqualTo(secondWorkdir.getDirectory());
assertThat(firstWorkdir.getDirectory()).exists();
assertThat(secondWorkdir.getDirectory()).doesNotExist();
private void queryAndCloseWorkdir(Path temp, int index) {
Repository repository = new Repository("repo-" + index, "git", "space", "X" + index);
when(workingCopyContext.getScmRepository()).thenReturn(repository);
String workdirName = "path-" + index;
lenient().doAnswer(invocation -> {
File newWorkdir = temp.resolve(workdirName).toFile();
newWorkdir.mkdirs();
return newWorkdir;
}).when(workdirProvider).createNewWorkdir(anyString());
WorkingCopy<Object, Path> workingCopy = simpleCachingWorkingCopyPool.getWorkingCopy(workingCopyContext);
simpleCachingWorkingCopyPool.contextClosed(workingCopyContext, workingCopy.getDirectory());
}
}

View File

@@ -25,6 +25,7 @@
package sonia.scm.repository.work;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.io.TempDir;
@@ -34,10 +35,15 @@ import sonia.scm.repository.RepositoryLocationResolver;
import sonia.scm.repository.RepositoryLocationResolver.RepositoryLocationResolverInstance;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.function.BiConsumer;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
@@ -52,7 +58,7 @@ class WorkdirProviderTest {
private RepositoryLocationResolverInstance<Path> repositoryLocationResolverInstance;
@BeforeEach
void initResolver(@TempDir Path temp) {
void initResolver() {
lenient().when(repositoryLocationResolver.forClass(Path.class)).thenReturn(repositoryLocationResolverInstance);
}
@@ -62,8 +68,9 @@ class WorkdirProviderTest {
File newWorkdir = provider.createNewWorkdir();
assertThat(newWorkdir).exists();
assertThat(newWorkdir).hasParent(temp.toFile());
assertThat(newWorkdir)
.exists()
.hasParent(temp.toFile());
verify(repositoryLocationResolverInstance, never()).getLocation(anyString());
}
@@ -86,8 +93,74 @@ class WorkdirProviderTest {
File newWorkdir = provider.createNewWorkdir("42");
assertThat(newWorkdir).exists();
assertThat(newWorkdir).hasParent(temp.toFile());
assertThat(newWorkdir)
.exists()
.hasParent(temp.toFile());
verify(repositoryLocationResolverInstance, never()).getLocation(anyString());
}
@Nested
class WithExistingGlobalWorkDir {
private Path globalRootDir;
private WorkdirProvider provider;
@BeforeEach
void createExistingWorkDir(@TempDir Path temp) throws IOException {
globalRootDir = temp.resolve("global");
Files.createDirectories(globalRootDir.resolve("global-temp"));
provider = new WorkdirProvider(globalRootDir.toFile(), repositoryLocationResolver, true);
}
@Test
void shouldDeleteOldGlobalWorkDirsOnStartup() {
provider.contextInitialized(null);
assertThat(globalRootDir).isEmptyDirectory();
}
@Test
void shouldDeleteOldGlobalWorkDirsOnShutdown() {
provider.contextDestroyed(null);
assertThat(globalRootDir).isEmptyDirectory();
}
}
@Nested
class WithExistingRepositoryWorkDir {
private Path repositoryRootDir;
private WorkdirProvider provider;
@BeforeEach
void createExistingWorkDir(@TempDir Path temp) throws IOException {
repositoryRootDir = temp.resolve("42");
Files.createDirectories(repositoryRootDir.resolve("work").resolve("repo-temp"));
doAnswer(
invocationOnMock -> {
invocationOnMock.getArgument(0, BiConsumer.class)
.accept("42", repositoryRootDir);
return null;
}
).when(repositoryLocationResolverInstance).forAllLocations(any());
provider = new WorkdirProvider(temp.resolve("global").toFile(), repositoryLocationResolver, true);
}
@Test
void shouldDeleteOldRepositoryRelatedWorkDirsOnStartup() {
provider.contextInitialized(null);
assertThat(repositoryRootDir.resolve("work")).isEmptyDirectory();
}
@Test
void shouldDeleteOldRepositoryRelatedWorkDirsOnShutdown() {
provider.contextInitialized(null);
assertThat(repositoryRootDir.resolve("work")).isEmptyDirectory();
}
}
}