From ed9c9a16fc6b5f57d3b4709cb74f25b2f02457b9 Mon Sep 17 00:00:00 2001 From: Szum123321 Date: Tue, 20 Dec 2022 00:47:57 +0100 Subject: [PATCH] moved name hashing to streams --- .../core/digest/FileTreeHashBuilder.java | 8 +---- .../core/digest/HashingInputStream.java | 7 ++-- .../core/digest/HashingOutputStream.java | 7 ++-- .../core/restore/RestoreBackupRunnable.java | 33 ++++++++++++------- 4 files changed, 32 insertions(+), 23 deletions(-) diff --git a/src/main/java/net/szum123321/textile_backup/core/digest/FileTreeHashBuilder.java b/src/main/java/net/szum123321/textile_backup/core/digest/FileTreeHashBuilder.java index 8c1e136..4abfde0 100644 --- a/src/main/java/net/szum123321/textile_backup/core/digest/FileTreeHashBuilder.java +++ b/src/main/java/net/szum123321/textile_backup/core/digest/FileTreeHashBuilder.java @@ -24,7 +24,6 @@ import net.szum123321.textile_backup.TextileLogger; import net.szum123321.textile_backup.core.CompressionStatus; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.concurrent.atomic.AtomicBoolean; @@ -46,14 +45,9 @@ public class FileTreeHashBuilder { long size = Files.size(path); - var hasher = Globals.CHECKSUM_SUPPLIER.get(); - - hasher.update(path.getFileName().toString().getBytes(StandardCharsets.UTF_8)); - hasher.update(newHash); - synchronized (lock) { //This way, the exact order of files processed doesn't matter. - this.hash ^= hasher.getValue(); + this.hash ^= newHash; filesProcessed++; filesTotalSize += size; } diff --git a/src/main/java/net/szum123321/textile_backup/core/digest/HashingInputStream.java b/src/main/java/net/szum123321/textile_backup/core/digest/HashingInputStream.java index 6a31343..ea88076 100644 --- a/src/main/java/net/szum123321/textile_backup/core/digest/HashingInputStream.java +++ b/src/main/java/net/szum123321/textile_backup/core/digest/HashingInputStream.java @@ -24,6 +24,7 @@ import net.szum123321.textile_backup.core.create.BrokenFileHandler; import org.jetbrains.annotations.NotNull; import java.io.*; +import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.util.concurrent.CountDownLatch; @@ -31,7 +32,7 @@ import java.util.concurrent.CountDownLatch; * This class calculates a hash of the file on the input stream, submits it to FileTreeHashBuilder. * In case the underlying stream hasn't been read completely in, puts it into BrokeFileHandler - * Futhermore, ParallelZip works by putting al the file requests into a queue and then compressing them + * Furthermore, ParallelZip works by putting all the file requests into a queue and then compressing them * with multiple threads. Thus, we have to make sure that all the files have been read before requesting the final value * That is what CountDownLatch does */ @@ -60,7 +61,7 @@ public class HashingInputStream extends FilterInputStream { @Override public int read() throws IOException { int i = in.read(); - if(i != -1) hasher.update(i); + if(i != -1) hasher.update((byte)i); return i; } @@ -71,6 +72,8 @@ public class HashingInputStream extends FilterInputStream { @Override public void close() throws IOException { + hasher.update(path.getFileName().toString().getBytes(StandardCharsets.UTF_8)); + latch.countDown(); if(in.available() == 0) hashBuilder.update(path, hasher.getValue()); diff --git a/src/main/java/net/szum123321/textile_backup/core/digest/HashingOutputStream.java b/src/main/java/net/szum123321/textile_backup/core/digest/HashingOutputStream.java index 909d2ef..3b293ba 100644 --- a/src/main/java/net/szum123321/textile_backup/core/digest/HashingOutputStream.java +++ b/src/main/java/net/szum123321/textile_backup/core/digest/HashingOutputStream.java @@ -24,6 +24,7 @@ import org.jetbrains.annotations.NotNull; import java.io.FilterOutputStream; import java.io.IOException; import java.io.OutputStream; +import java.nio.charset.StandardCharsets; import java.nio.file.Path; public class HashingOutputStream extends FilterOutputStream { @@ -39,21 +40,21 @@ public class HashingOutputStream extends FilterOutputStream { @Override public void write(int b) throws IOException { - hasher.update(b); out.write(b); + hasher.update((byte)b); } @Override public void write(byte @NotNull [] b, int off, int len) throws IOException { - hasher.update(b, off, len); out.write(b, off, len); + hasher.update(b, off, len); } @Override public void close() throws IOException { + hasher.update(path.getFileName().toString().getBytes(StandardCharsets.UTF_8)); long h = hasher.getValue(); hashBuilder.update(path, h); super.close(); - } } diff --git a/src/main/java/net/szum123321/textile_backup/core/restore/RestoreBackupRunnable.java b/src/main/java/net/szum123321/textile_backup/core/restore/RestoreBackupRunnable.java index 9fd39ed..cd1135b 100644 --- a/src/main/java/net/szum123321/textile_backup/core/restore/RestoreBackupRunnable.java +++ b/src/main/java/net/szum123321/textile_backup/core/restore/RestoreBackupRunnable.java @@ -34,6 +34,7 @@ import net.szum123321.textile_backup.core.restore.decompressors.ZipDecompressor; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Optional; import java.util.concurrent.FutureTask; /** @@ -63,7 +64,8 @@ public class RestoreBackupRunnable implements Runnable { try { tmp = Files.createTempDirectory( ctx.server().getRunDirectory().toPath(), - ctx.restoreableFile().getFile().getFileName().toString()); + ctx.restoreableFile().getFile().getFileName().toString() + ); } catch (IOException e) { log.error("An exception occurred while unpacking backup", e); return; @@ -72,7 +74,8 @@ public class RestoreBackupRunnable implements Runnable { //By making a separate thread we can start unpacking an old backup instantly //Let the server shut down gracefully, and wait for the old world backup to complete FutureTask waitForShutdown = new FutureTask<>(() -> { - ctx.server().getThread().join(); //wait for server to die and save all its state + ctx.server().getThread().join(); //wait for server thread to die and save all its state + if(config.get().backupOldWorlds) { return MakeBackupRunnableFactory.create ( BackupContext.Builder @@ -84,6 +87,7 @@ public class RestoreBackupRunnable implements Runnable { .build() ).call(); } + return null; }); @@ -99,21 +103,28 @@ public class RestoreBackupRunnable implements Runnable { else hash = GenericTarDecompressor.decompress(ctx.restoreableFile().getFile(), tmp); - CompressionStatus status = CompressionStatus.readFromFile(tmp); - Files.delete(tmp.resolve(CompressionStatus.DATA_FILENAME)); - log.info("Waiting for server to fully terminate..."); //locks until the backup is finished waitForShutdown.get(); - log.info("Status: {}", status); + Optional errorMsg; - var state = status.isValid(hash, ctx); + if(Files.notExists(CompressionStatus.resolveStatusFilename(tmp))) { + errorMsg = Optional.of("Status file not found!"); + } else { + CompressionStatus status = CompressionStatus.readFromFile(tmp); - if(state.isEmpty() || !config.get().errorErrorHandlingMode.verify()) { - if (state.isEmpty()) log.info("Backup valid. Restoring"); - else log.info("Backup is damaged, but verification is disabled [{}]. Restoring", state.get()); + log.info("Status: {}", status); + + Files.delete(tmp.resolve(CompressionStatus.DATA_FILENAME)); + + errorMsg = status.validate(hash, ctx); + } + + if(errorMsg.isEmpty() || !config.get().integrityVerificationMode.verify()) { + if (errorMsg.isEmpty()) log.info("Backup valid. Restoring"); + else log.info("Backup is damaged, but verification is disabled [{}]. Restoring", errorMsg.get()); Utilities.deleteDirectory(worldFile); Files.move(tmp, worldFile); @@ -123,7 +134,7 @@ public class RestoreBackupRunnable implements Runnable { Files.delete(ctx.restoreableFile().getFile()); } } else { - log.error(state.get()); + log.error(errorMsg.get()); } } catch (Exception e) { log.error("An exception occurred while trying to restore a backup!", e);