diff --git a/src/main/java/net/szum123321/textile_backup/Globals.java b/src/main/java/net/szum123321/textile_backup/Globals.java index 37d620a..62f04d3 100644 --- a/src/main/java/net/szum123321/textile_backup/Globals.java +++ b/src/main/java/net/szum123321/textile_backup/Globals.java @@ -51,6 +51,8 @@ public class Globals { private AwaitThread restoreAwaitThread = null; private Path lockedPath = null; + private String combinedVersionString; + private Globals() {} public ExecutorService getQueueExecutor() { return executorService; } @@ -107,4 +109,12 @@ public class Globals { if(disableTMPFiles) log.error("Might cause: https://github.com/Szum123321/textile_backup/wiki/ZIP-Problems"); } + + public String getCombinedVersionString() { + return combinedVersionString; + } + + public void setCombinedVersionString(String combinedVersionString) { + this.combinedVersionString = combinedVersionString; + } } diff --git a/src/main/java/net/szum123321/textile_backup/core/create/InputSupplier.java b/src/main/java/net/szum123321/textile_backup/core/create/InputSupplier.java index 6a5ec6a..76a40e7 100644 --- a/src/main/java/net/szum123321/textile_backup/core/create/InputSupplier.java +++ b/src/main/java/net/szum123321/textile_backup/core/create/InputSupplier.java @@ -25,10 +25,9 @@ import java.io.InputStream; import java.nio.file.Path; import java.util.Optional; - public interface InputSupplier extends InputStreamSupplier { InputStream getInputStream() throws IOException; - //If an entry is virtual (a.k.a there is no actual file to open, only input stream) + //If an entry is virtual (a.k.a. there is no actual file to open, only input stream) Optional getPath(); String getName(); diff --git a/src/main/java/net/szum123321/textile_backup/core/create/compressors/AbstractCompressor.java b/src/main/java/net/szum123321/textile_backup/core/create/compressors/AbstractCompressor.java index a0cf75e..9866626 100644 --- a/src/main/java/net/szum123321/textile_backup/core/create/compressors/AbstractCompressor.java +++ b/src/main/java/net/szum123321/textile_backup/core/create/compressors/AbstractCompressor.java @@ -40,7 +40,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; /** - * Basic abstract class representing directory compressor + * Basic abstract class representing directory compressor with all the bells and whistles */ public abstract class AbstractCompressor { private final static TextileLogger log = new TextileLogger(TextileBackup.MOD_NAME); @@ -49,14 +49,14 @@ public abstract class AbstractCompressor { Instant start = Instant.now(); FileTreeHashBuilder fileHashBuilder = new FileTreeHashBuilder(); - BrokenFileHandler brokenFileHandler = new BrokenFileHandler(); + BrokenFileHandler brokenFileHandler = new BrokenFileHandler(); //Basically a hashmap storing files and their respective exceptions try (OutputStream outStream = Files.newOutputStream(outputFile); BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(outStream); OutputStream arc = createArchiveOutputStream(bufferedOutputStream, ctx, coreLimit); Stream fileStream = Files.walk(inputFile)) { - AtomicInteger fileCounter = new AtomicInteger(0); + AtomicInteger fileCounter = new AtomicInteger(0); //number of files to compress var it = fileStream .filter(path -> !Utilities.isBlacklisted(inputFile.relativize(path))) @@ -66,6 +66,7 @@ public abstract class AbstractCompressor { log.info("File count: {}", fileCounter.get()); + //will be used in conjunction with ParallelZip to avoid race condition CountDownLatch latch = new CountDownLatch(fileCounter.get()); while(it.hasNext()) { @@ -83,6 +84,7 @@ public abstract class AbstractCompressor { ); } catch (IOException e) { brokenFileHandler.handle(file, e); + //In Permissive mode we allow partial backups if(ConfigHelper.INSTANCE.get().errorErrorHandlingMode.isStrict()) throw e; else log.sendErrorAL(ctx, "An exception occurred while trying to compress: {}", inputFile.relativize(file).toString(), e @@ -90,6 +92,7 @@ public abstract class AbstractCompressor { } } + //wait for all the InputStreams to close/fail with InputSupplier latch.await(); Instant now = Instant.now(); diff --git a/src/main/java/net/szum123321/textile_backup/core/create/compressors/tar/AbstractTarArchiver.java b/src/main/java/net/szum123321/textile_backup/core/create/compressors/tar/AbstractTarArchiver.java index 94d0ad2..4ae84a3 100644 --- a/src/main/java/net/szum123321/textile_backup/core/create/compressors/tar/AbstractTarArchiver.java +++ b/src/main/java/net/szum123321/textile_backup/core/create/compressors/tar/AbstractTarArchiver.java @@ -45,7 +45,7 @@ public class AbstractTarArchiver extends AbstractCompressor { protected void addEntry(InputSupplier input, OutputStream arc) throws IOException { try (InputStream fileInputStream = input.getInputStream()) { TarArchiveEntry entry; - if(input.getPath().isEmpty()) {//Virtual entry + if(input.getPath().isEmpty()) { //Virtual entry entry = new TarArchiveEntry(input.getName()); entry.setSize(input.size()); } else diff --git a/src/main/java/net/szum123321/textile_backup/core/digest/BalticHash.java b/src/main/java/net/szum123321/textile_backup/core/digest/BalticHash.java index 3ad5280..b1892b9 100644 --- a/src/main/java/net/szum123321/textile_backup/core/digest/BalticHash.java +++ b/src/main/java/net/szum123321/textile_backup/core/digest/BalticHash.java @@ -22,16 +22,15 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.Arrays; -/* - This algorithm copies construction of SeaHash (https://ticki.github.io/blog/seahash-explained/) including its IV - What it differs in is that it uses Xoroshift64* instead of PCG. Although it might lower the output quality, - I don't think it matters that much, honestly. One advantage the xoroshift has is that it should be - easier to implement with AVX. Java should soon ship its vector api by default. +/** + * This algorithm copies construction of SeaHash including its IV. + * What it differs in is that it uses Xoroshift64* instead of PCG as its pseudo-random function. Although it might lower + * the output quality, I don't think it matters that much, honestly. One advantage of xoroshift is that it should be + * easier to implement with AVX. Java should soon ship its vector api by default. */ public class BalticHash implements Hash { - protected final static long[] IV = { 0x16f11fe89b0d677cL, 0xb480a793d8e6c86cL, 0x6fe2e5aaf078ebc9L, 0x14f994a4c5259381L}; - //SeaHash IV + protected final static long[] IV = { 0x16f11fe89b0d677cL, 0xb480a793d8e6c86cL, 0x6fe2e5aaf078ebc9L, 0x14f994a4c5259381L }; private final long[] state = Arrays.copyOf(IV, IV.length); protected final int buffer_limit = state.length * Long.BYTES; protected final byte[] _byte_buffer = new byte[(state.length + 1) * Long.BYTES]; diff --git a/src/main/java/net/szum123321/textile_backup/core/digest/BalticHashSIMD.java b/src/main/java/net/szum123321/textile_backup/core/digest/BalticHashSIMD.java index ed633a2..2e15c56 100644 --- a/src/main/java/net/szum123321/textile_backup/core/digest/BalticHashSIMD.java +++ b/src/main/java/net/szum123321/textile_backup/core/digest/BalticHashSIMD.java @@ -22,16 +22,16 @@ package net.szum123321.textile_backup.core.digest; import net.szum123321.textile_backup.core.digest.BalticHash; -/*Mostly working XorSeaHash impl using SIMD. Should speed up calculation on most systems currently in use +/** + * Mostly working XorSeaHash impl using SIMD. Should speed up calculation on most systems currently in use -It's actually slower. I tested it by comparing runtimes while hashing a directly opened FileInputStream. -My cpu is AMD Ryzen 5 3500U +
...
-There are two reasons I can think of: either vector construction simply takes so much time or jvm auto-vectorizes better than me - -It's still probably far from being the slowest part of code, so I don't expect any major slowdowns - -I will keep this code here for future work perhaps + * It's actually slower. I tested it by comparing runtimes while hashing a directly opened FileInputStream. + * My cpu is AMD Ryzen 5 3500U + * There are two reasons I can think of: either vector construction simply takes so much time or jvm auto-vectorizes better than I. + * It's still probably far from being the slowest part of code, so I don't expect any major slowdowns + * I will keep this code here for future work perhaps */ public class BalticHashSIMD extends BalticHash {/* public BalticHashSIMD() { throw new UnsupportedOperationException(); } //For safety diff --git a/src/main/java/net/szum123321/textile_backup/core/digest/FileTreeHashBuilder.java b/src/main/java/net/szum123321/textile_backup/core/digest/FileTreeHashBuilder.java index 5722d7b..8c1e136 100644 --- a/src/main/java/net/szum123321/textile_backup/core/digest/FileTreeHashBuilder.java +++ b/src/main/java/net/szum123321/textile_backup/core/digest/FileTreeHashBuilder.java @@ -29,6 +29,10 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.concurrent.atomic.AtomicBoolean; +/** + * What this class does is it collects the hashed files and combines them into a single number, + * thus we can verify file tree integrity + */ public class FileTreeHashBuilder { private final static TextileLogger log = new TextileLogger(TextileBackup.MOD_NAME); private final Object lock = new Object(); diff --git a/src/main/java/net/szum123321/textile_backup/core/digest/Hash.java b/src/main/java/net/szum123321/textile_backup/core/digest/Hash.java index d05533a..aa54578 100644 --- a/src/main/java/net/szum123321/textile_backup/core/digest/Hash.java +++ b/src/main/java/net/szum123321/textile_backup/core/digest/Hash.java @@ -21,13 +21,12 @@ package net.szum123321.textile_backup.core.digest; public interface Hash { void update(byte b); - default void update(int b) { - update((byte)b); - } + default void update(int b) { update((byte)b); } + void update(long b); - default void update(byte[] b) { - update(b, 0, b.length); - } + + default void update(byte[] b) { update(b, 0, b.length); } + void update(byte[] b, int off, int len); long getValue(); diff --git a/src/main/java/net/szum123321/textile_backup/core/digest/HashingInputStream.java b/src/main/java/net/szum123321/textile_backup/core/digest/HashingInputStream.java index 501935f..2f51eb0 100644 --- a/src/main/java/net/szum123321/textile_backup/core/digest/HashingInputStream.java +++ b/src/main/java/net/szum123321/textile_backup/core/digest/HashingInputStream.java @@ -27,8 +27,14 @@ import java.io.*; import java.nio.file.Path; import java.util.concurrent.CountDownLatch; -//This class calculates a hash of the file on the input stream, submits it to FileTreeHashBuilder. -//In case the underlying stream hasn't been read completely in, puts it into BrokeFileHandler +/** + * This class calculates a hash of the file on the input stream, submits it to FileTreeHashBuilder. + * In case the underlying stream hasn't been read completely in, puts it into BrokeFileHandler + + * Futhermore, ParallelZip works by putting al the file requests into a queue and then compressing them + * with multiple threads. Thus, we have to make sure that all the files have been read before requesting the final value + * That is what CountDownLatch does + */ public class HashingInputStream extends FilterInputStream { private final Path path; private final Hash hasher = Globals.CHECKSUM_SUPPLIER.get(); @@ -36,7 +42,6 @@ public class HashingInputStream extends FilterInputStream { private final BrokenFileHandler brokenFileHandler; private final CountDownLatch latch; - public HashingInputStream(InputStream in, Path path, FileTreeHashBuilder hashBuilder, BrokenFileHandler brokenFileHandler, CountDownLatch latch) { super(in); this.path = path; diff --git a/src/main/java/net/szum123321/textile_backup/core/restore/RestoreBackupRunnable.java b/src/main/java/net/szum123321/textile_backup/core/restore/RestoreBackupRunnable.java index 806a4f5..0eff593 100644 --- a/src/main/java/net/szum123321/textile_backup/core/restore/RestoreBackupRunnable.java +++ b/src/main/java/net/szum123321/textile_backup/core/restore/RestoreBackupRunnable.java @@ -37,6 +37,9 @@ import java.nio.file.Path; import java.util.concurrent.ExecutionException; import java.util.concurrent.FutureTask; +/** + * This class restores a file provided by RestoreContext. + */ public class RestoreBackupRunnable implements Runnable { private final static TextileLogger log = new TextileLogger(TextileBackup.MOD_NAME); private final static ConfigHelper config = ConfigHelper.INSTANCE;