improved comments and formatting
This commit is contained in:
parent
8385044154
commit
472aeda184
@ -51,6 +51,8 @@ public class Globals {
|
|||||||
private AwaitThread restoreAwaitThread = null;
|
private AwaitThread restoreAwaitThread = null;
|
||||||
private Path lockedPath = null;
|
private Path lockedPath = null;
|
||||||
|
|
||||||
|
private String combinedVersionString;
|
||||||
|
|
||||||
private Globals() {}
|
private Globals() {}
|
||||||
|
|
||||||
public ExecutorService getQueueExecutor() { return executorService; }
|
public ExecutorService getQueueExecutor() { return executorService; }
|
||||||
@ -107,4 +109,12 @@ public class Globals {
|
|||||||
|
|
||||||
if(disableTMPFiles) log.error("Might cause: https://github.com/Szum123321/textile_backup/wiki/ZIP-Problems");
|
if(disableTMPFiles) log.error("Might cause: https://github.com/Szum123321/textile_backup/wiki/ZIP-Problems");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getCombinedVersionString() {
|
||||||
|
return combinedVersionString;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setCombinedVersionString(String combinedVersionString) {
|
||||||
|
this.combinedVersionString = combinedVersionString;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,10 +25,9 @@ import java.io.InputStream;
|
|||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
|
||||||
|
|
||||||
public interface InputSupplier extends InputStreamSupplier {
|
public interface InputSupplier extends InputStreamSupplier {
|
||||||
InputStream getInputStream() throws IOException;
|
InputStream getInputStream() throws IOException;
|
||||||
//If an entry is virtual (a.k.a there is no actual file to open, only input stream)
|
//If an entry is virtual (a.k.a. there is no actual file to open, only input stream)
|
||||||
Optional<Path> getPath();
|
Optional<Path> getPath();
|
||||||
String getName();
|
String getName();
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||||||
import java.util.stream.Stream;
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Basic abstract class representing directory compressor
|
* Basic abstract class representing directory compressor with all the bells and whistles
|
||||||
*/
|
*/
|
||||||
public abstract class AbstractCompressor {
|
public abstract class AbstractCompressor {
|
||||||
private final static TextileLogger log = new TextileLogger(TextileBackup.MOD_NAME);
|
private final static TextileLogger log = new TextileLogger(TextileBackup.MOD_NAME);
|
||||||
@ -49,14 +49,14 @@ public abstract class AbstractCompressor {
|
|||||||
Instant start = Instant.now();
|
Instant start = Instant.now();
|
||||||
|
|
||||||
FileTreeHashBuilder fileHashBuilder = new FileTreeHashBuilder();
|
FileTreeHashBuilder fileHashBuilder = new FileTreeHashBuilder();
|
||||||
BrokenFileHandler brokenFileHandler = new BrokenFileHandler();
|
BrokenFileHandler brokenFileHandler = new BrokenFileHandler(); //Basically a hashmap storing files and their respective exceptions
|
||||||
|
|
||||||
try (OutputStream outStream = Files.newOutputStream(outputFile);
|
try (OutputStream outStream = Files.newOutputStream(outputFile);
|
||||||
BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(outStream);
|
BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(outStream);
|
||||||
OutputStream arc = createArchiveOutputStream(bufferedOutputStream, ctx, coreLimit);
|
OutputStream arc = createArchiveOutputStream(bufferedOutputStream, ctx, coreLimit);
|
||||||
Stream<Path> fileStream = Files.walk(inputFile)) {
|
Stream<Path> fileStream = Files.walk(inputFile)) {
|
||||||
|
|
||||||
AtomicInteger fileCounter = new AtomicInteger(0);
|
AtomicInteger fileCounter = new AtomicInteger(0); //number of files to compress
|
||||||
|
|
||||||
var it = fileStream
|
var it = fileStream
|
||||||
.filter(path -> !Utilities.isBlacklisted(inputFile.relativize(path)))
|
.filter(path -> !Utilities.isBlacklisted(inputFile.relativize(path)))
|
||||||
@ -66,6 +66,7 @@ public abstract class AbstractCompressor {
|
|||||||
|
|
||||||
log.info("File count: {}", fileCounter.get());
|
log.info("File count: {}", fileCounter.get());
|
||||||
|
|
||||||
|
//will be used in conjunction with ParallelZip to avoid race condition
|
||||||
CountDownLatch latch = new CountDownLatch(fileCounter.get());
|
CountDownLatch latch = new CountDownLatch(fileCounter.get());
|
||||||
|
|
||||||
while(it.hasNext()) {
|
while(it.hasNext()) {
|
||||||
@ -83,6 +84,7 @@ public abstract class AbstractCompressor {
|
|||||||
);
|
);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
brokenFileHandler.handle(file, e);
|
brokenFileHandler.handle(file, e);
|
||||||
|
//In Permissive mode we allow partial backups
|
||||||
if(ConfigHelper.INSTANCE.get().errorErrorHandlingMode.isStrict()) throw e;
|
if(ConfigHelper.INSTANCE.get().errorErrorHandlingMode.isStrict()) throw e;
|
||||||
else log.sendErrorAL(ctx, "An exception occurred while trying to compress: {}",
|
else log.sendErrorAL(ctx, "An exception occurred while trying to compress: {}",
|
||||||
inputFile.relativize(file).toString(), e
|
inputFile.relativize(file).toString(), e
|
||||||
@ -90,6 +92,7 @@ public abstract class AbstractCompressor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//wait for all the InputStreams to close/fail with InputSupplier
|
||||||
latch.await();
|
latch.await();
|
||||||
|
|
||||||
Instant now = Instant.now();
|
Instant now = Instant.now();
|
||||||
|
@ -45,7 +45,7 @@ public class AbstractTarArchiver extends AbstractCompressor {
|
|||||||
protected void addEntry(InputSupplier input, OutputStream arc) throws IOException {
|
protected void addEntry(InputSupplier input, OutputStream arc) throws IOException {
|
||||||
try (InputStream fileInputStream = input.getInputStream()) {
|
try (InputStream fileInputStream = input.getInputStream()) {
|
||||||
TarArchiveEntry entry;
|
TarArchiveEntry entry;
|
||||||
if(input.getPath().isEmpty()) {//Virtual entry
|
if(input.getPath().isEmpty()) { //Virtual entry
|
||||||
entry = new TarArchiveEntry(input.getName());
|
entry = new TarArchiveEntry(input.getName());
|
||||||
entry.setSize(input.size());
|
entry.setSize(input.size());
|
||||||
} else
|
} else
|
||||||
|
@ -22,16 +22,15 @@ import java.nio.ByteBuffer;
|
|||||||
import java.nio.ByteOrder;
|
import java.nio.ByteOrder;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
|
||||||
/*
|
/**
|
||||||
This algorithm copies construction of SeaHash (https://ticki.github.io/blog/seahash-explained/) including its IV
|
* This algorithm copies construction of <a href="https://ticki.github.io/blog/seahash-explained/">SeaHash</a> including its IV.
|
||||||
What it differs in is that it uses Xoroshift64* instead of PCG. Although it might lower the output quality,
|
* What it differs in is that it uses Xoroshift64* instead of PCG as its pseudo-random function. Although it might lower
|
||||||
I don't think it matters that much, honestly. One advantage the xoroshift has is that it should be
|
* the output quality, I don't think it matters that much, honestly. One advantage of xoroshift is that it should be
|
||||||
easier to implement with AVX. Java should soon ship its vector api by default.
|
* easier to implement with AVX. Java should soon ship its vector api by default.
|
||||||
*/
|
*/
|
||||||
public class BalticHash implements Hash {
|
public class BalticHash implements Hash {
|
||||||
protected final static long[] IV = { 0x16f11fe89b0d677cL, 0xb480a793d8e6c86cL, 0x6fe2e5aaf078ebc9L, 0x14f994a4c5259381L};
|
|
||||||
|
|
||||||
//SeaHash IV
|
//SeaHash IV
|
||||||
|
protected final static long[] IV = { 0x16f11fe89b0d677cL, 0xb480a793d8e6c86cL, 0x6fe2e5aaf078ebc9L, 0x14f994a4c5259381L };
|
||||||
private final long[] state = Arrays.copyOf(IV, IV.length);
|
private final long[] state = Arrays.copyOf(IV, IV.length);
|
||||||
protected final int buffer_limit = state.length * Long.BYTES;
|
protected final int buffer_limit = state.length * Long.BYTES;
|
||||||
protected final byte[] _byte_buffer = new byte[(state.length + 1) * Long.BYTES];
|
protected final byte[] _byte_buffer = new byte[(state.length + 1) * Long.BYTES];
|
||||||
|
@ -22,16 +22,16 @@ package net.szum123321.textile_backup.core.digest;
|
|||||||
|
|
||||||
import net.szum123321.textile_backup.core.digest.BalticHash;
|
import net.szum123321.textile_backup.core.digest.BalticHash;
|
||||||
|
|
||||||
/*Mostly working XorSeaHash impl using SIMD. Should speed up calculation on most systems currently in use
|
/**
|
||||||
|
* Mostly working XorSeaHash impl using SIMD. Should speed up calculation on most systems currently in use
|
||||||
|
|
||||||
It's actually slower. I tested it by comparing runtimes while hashing a directly opened FileInputStream.
|
<br>...<br>
|
||||||
My cpu is AMD Ryzen 5 3500U
|
|
||||||
|
|
||||||
There are two reasons I can think of: either vector construction simply takes so much time or jvm auto-vectorizes better than me
|
* It's actually slower. I tested it by comparing runtimes while hashing a directly opened FileInputStream.
|
||||||
|
* My cpu is AMD Ryzen 5 3500U
|
||||||
It's still probably far from being the slowest part of code, so I don't expect any major slowdowns
|
* There are two reasons I can think of: either vector construction simply takes so much time or jvm auto-vectorizes better than I.
|
||||||
|
* It's still probably far from being the slowest part of code, so I don't expect any major slowdowns
|
||||||
I will keep this code here for future work perhaps
|
* I will keep this code here for future work perhaps
|
||||||
*/
|
*/
|
||||||
public class BalticHashSIMD extends BalticHash {/*
|
public class BalticHashSIMD extends BalticHash {/*
|
||||||
public BalticHashSIMD() { throw new UnsupportedOperationException(); } //For safety
|
public BalticHashSIMD() { throw new UnsupportedOperationException(); } //For safety
|
||||||
|
@ -29,6 +29,10 @@ import java.nio.file.Files;
|
|||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* What this class does is it collects the hashed files and combines them into a single number,
|
||||||
|
* thus we can verify file tree integrity
|
||||||
|
*/
|
||||||
public class FileTreeHashBuilder {
|
public class FileTreeHashBuilder {
|
||||||
private final static TextileLogger log = new TextileLogger(TextileBackup.MOD_NAME);
|
private final static TextileLogger log = new TextileLogger(TextileBackup.MOD_NAME);
|
||||||
private final Object lock = new Object();
|
private final Object lock = new Object();
|
||||||
|
@ -21,13 +21,12 @@ package net.szum123321.textile_backup.core.digest;
|
|||||||
public interface Hash {
|
public interface Hash {
|
||||||
void update(byte b);
|
void update(byte b);
|
||||||
|
|
||||||
default void update(int b) {
|
default void update(int b) { update((byte)b); }
|
||||||
update((byte)b);
|
|
||||||
}
|
|
||||||
void update(long b);
|
void update(long b);
|
||||||
default void update(byte[] b) {
|
|
||||||
update(b, 0, b.length);
|
default void update(byte[] b) { update(b, 0, b.length); }
|
||||||
}
|
|
||||||
void update(byte[] b, int off, int len);
|
void update(byte[] b, int off, int len);
|
||||||
|
|
||||||
long getValue();
|
long getValue();
|
||||||
|
@ -27,8 +27,14 @@ import java.io.*;
|
|||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
|
||||||
//This class calculates a hash of the file on the input stream, submits it to FileTreeHashBuilder.
|
/**
|
||||||
//In case the underlying stream hasn't been read completely in, puts it into BrokeFileHandler
|
* This class calculates a hash of the file on the input stream, submits it to FileTreeHashBuilder.
|
||||||
|
* In case the underlying stream hasn't been read completely in, puts it into BrokeFileHandler
|
||||||
|
|
||||||
|
* Futhermore, ParallelZip works by putting al the file requests into a queue and then compressing them
|
||||||
|
* with multiple threads. Thus, we have to make sure that all the files have been read before requesting the final value
|
||||||
|
* That is what CountDownLatch does
|
||||||
|
*/
|
||||||
public class HashingInputStream extends FilterInputStream {
|
public class HashingInputStream extends FilterInputStream {
|
||||||
private final Path path;
|
private final Path path;
|
||||||
private final Hash hasher = Globals.CHECKSUM_SUPPLIER.get();
|
private final Hash hasher = Globals.CHECKSUM_SUPPLIER.get();
|
||||||
@ -36,7 +42,6 @@ public class HashingInputStream extends FilterInputStream {
|
|||||||
private final BrokenFileHandler brokenFileHandler;
|
private final BrokenFileHandler brokenFileHandler;
|
||||||
private final CountDownLatch latch;
|
private final CountDownLatch latch;
|
||||||
|
|
||||||
|
|
||||||
public HashingInputStream(InputStream in, Path path, FileTreeHashBuilder hashBuilder, BrokenFileHandler brokenFileHandler, CountDownLatch latch) {
|
public HashingInputStream(InputStream in, Path path, FileTreeHashBuilder hashBuilder, BrokenFileHandler brokenFileHandler, CountDownLatch latch) {
|
||||||
super(in);
|
super(in);
|
||||||
this.path = path;
|
this.path = path;
|
||||||
|
@ -37,6 +37,9 @@ import java.nio.file.Path;
|
|||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.FutureTask;
|
import java.util.concurrent.FutureTask;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class restores a file provided by RestoreContext.
|
||||||
|
*/
|
||||||
public class RestoreBackupRunnable implements Runnable {
|
public class RestoreBackupRunnable implements Runnable {
|
||||||
private final static TextileLogger log = new TextileLogger(TextileBackup.MOD_NAME);
|
private final static TextileLogger log = new TextileLogger(TextileBackup.MOD_NAME);
|
||||||
private final static ConfigHelper config = ConfigHelper.INSTANCE;
|
private final static ConfigHelper config = ConfigHelper.INSTANCE;
|
||||||
|
Loading…
Reference in New Issue
Block a user