moved name hashing to streams
This commit is contained in:
parent
9b3b908d0a
commit
ed9c9a16fc
@ -24,7 +24,6 @@ import net.szum123321.textile_backup.TextileLogger;
|
||||
import net.szum123321.textile_backup.core.CompressionStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
@ -46,14 +45,9 @@ public class FileTreeHashBuilder {
|
||||
|
||||
long size = Files.size(path);
|
||||
|
||||
var hasher = Globals.CHECKSUM_SUPPLIER.get();
|
||||
|
||||
hasher.update(path.getFileName().toString().getBytes(StandardCharsets.UTF_8));
|
||||
hasher.update(newHash);
|
||||
|
||||
synchronized (lock) {
|
||||
//This way, the exact order of files processed doesn't matter.
|
||||
this.hash ^= hasher.getValue();
|
||||
this.hash ^= newHash;
|
||||
filesProcessed++;
|
||||
filesTotalSize += size;
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import net.szum123321.textile_backup.core.create.BrokenFileHandler;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
@ -31,7 +32,7 @@ import java.util.concurrent.CountDownLatch;
|
||||
* This class calculates a hash of the file on the input stream, submits it to FileTreeHashBuilder.
|
||||
* In case the underlying stream hasn't been read completely in, puts it into BrokeFileHandler
|
||||
|
||||
* Futhermore, ParallelZip works by putting al the file requests into a queue and then compressing them
|
||||
* Furthermore, ParallelZip works by putting all the file requests into a queue and then compressing them
|
||||
* with multiple threads. Thus, we have to make sure that all the files have been read before requesting the final value
|
||||
* That is what CountDownLatch does
|
||||
*/
|
||||
@ -60,7 +61,7 @@ public class HashingInputStream extends FilterInputStream {
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
int i = in.read();
|
||||
if(i != -1) hasher.update(i);
|
||||
if(i != -1) hasher.update((byte)i);
|
||||
return i;
|
||||
}
|
||||
|
||||
@ -71,6 +72,8 @@ public class HashingInputStream extends FilterInputStream {
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
hasher.update(path.getFileName().toString().getBytes(StandardCharsets.UTF_8));
|
||||
|
||||
latch.countDown();
|
||||
|
||||
if(in.available() == 0) hashBuilder.update(path, hasher.getValue());
|
||||
|
@ -24,6 +24,7 @@ import org.jetbrains.annotations.NotNull;
|
||||
import java.io.FilterOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
|
||||
public class HashingOutputStream extends FilterOutputStream {
|
||||
@ -39,21 +40,21 @@ public class HashingOutputStream extends FilterOutputStream {
|
||||
|
||||
@Override
|
||||
public void write(int b) throws IOException {
|
||||
hasher.update(b);
|
||||
out.write(b);
|
||||
hasher.update((byte)b);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte @NotNull [] b, int off, int len) throws IOException {
|
||||
hasher.update(b, off, len);
|
||||
out.write(b, off, len);
|
||||
hasher.update(b, off, len);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
hasher.update(path.getFileName().toString().getBytes(StandardCharsets.UTF_8));
|
||||
long h = hasher.getValue();
|
||||
hashBuilder.update(path, h);
|
||||
super.close();
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ import net.szum123321.textile_backup.core.restore.decompressors.ZipDecompressor;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.FutureTask;
|
||||
|
||||
/**
|
||||
@ -63,7 +64,8 @@ public class RestoreBackupRunnable implements Runnable {
|
||||
try {
|
||||
tmp = Files.createTempDirectory(
|
||||
ctx.server().getRunDirectory().toPath(),
|
||||
ctx.restoreableFile().getFile().getFileName().toString());
|
||||
ctx.restoreableFile().getFile().getFileName().toString()
|
||||
);
|
||||
} catch (IOException e) {
|
||||
log.error("An exception occurred while unpacking backup", e);
|
||||
return;
|
||||
@ -72,7 +74,8 @@ public class RestoreBackupRunnable implements Runnable {
|
||||
//By making a separate thread we can start unpacking an old backup instantly
|
||||
//Let the server shut down gracefully, and wait for the old world backup to complete
|
||||
FutureTask<Void> waitForShutdown = new FutureTask<>(() -> {
|
||||
ctx.server().getThread().join(); //wait for server to die and save all its state
|
||||
ctx.server().getThread().join(); //wait for server thread to die and save all its state
|
||||
|
||||
if(config.get().backupOldWorlds) {
|
||||
return MakeBackupRunnableFactory.create (
|
||||
BackupContext.Builder
|
||||
@ -84,6 +87,7 @@ public class RestoreBackupRunnable implements Runnable {
|
||||
.build()
|
||||
).call();
|
||||
}
|
||||
|
||||
return null;
|
||||
});
|
||||
|
||||
@ -99,21 +103,28 @@ public class RestoreBackupRunnable implements Runnable {
|
||||
else
|
||||
hash = GenericTarDecompressor.decompress(ctx.restoreableFile().getFile(), tmp);
|
||||
|
||||
CompressionStatus status = CompressionStatus.readFromFile(tmp);
|
||||
Files.delete(tmp.resolve(CompressionStatus.DATA_FILENAME));
|
||||
|
||||
log.info("Waiting for server to fully terminate...");
|
||||
|
||||
//locks until the backup is finished
|
||||
waitForShutdown.get();
|
||||
|
||||
Optional<String> errorMsg;
|
||||
|
||||
if(Files.notExists(CompressionStatus.resolveStatusFilename(tmp))) {
|
||||
errorMsg = Optional.of("Status file not found!");
|
||||
} else {
|
||||
CompressionStatus status = CompressionStatus.readFromFile(tmp);
|
||||
|
||||
log.info("Status: {}", status);
|
||||
|
||||
var state = status.isValid(hash, ctx);
|
||||
Files.delete(tmp.resolve(CompressionStatus.DATA_FILENAME));
|
||||
|
||||
if(state.isEmpty() || !config.get().errorErrorHandlingMode.verify()) {
|
||||
if (state.isEmpty()) log.info("Backup valid. Restoring");
|
||||
else log.info("Backup is damaged, but verification is disabled [{}]. Restoring", state.get());
|
||||
errorMsg = status.validate(hash, ctx);
|
||||
}
|
||||
|
||||
if(errorMsg.isEmpty() || !config.get().integrityVerificationMode.verify()) {
|
||||
if (errorMsg.isEmpty()) log.info("Backup valid. Restoring");
|
||||
else log.info("Backup is damaged, but verification is disabled [{}]. Restoring", errorMsg.get());
|
||||
|
||||
Utilities.deleteDirectory(worldFile);
|
||||
Files.move(tmp, worldFile);
|
||||
@ -123,7 +134,7 @@ public class RestoreBackupRunnable implements Runnable {
|
||||
Files.delete(ctx.restoreableFile().getFile());
|
||||
}
|
||||
} else {
|
||||
log.error(state.get());
|
||||
log.error(errorMsg.get());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("An exception occurred while trying to restore a backup!", e);
|
||||
|
Loading…
Reference in New Issue
Block a user