mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
Add support for writing through StreamInterface.
This adds method and tests for writing to a PDB stream. With this, even a PDB stream which is discontiguous can be treated as a sequential stream of bytes for the purposes of writing. Reviewed By: ruiu Differential Revision: http://reviews.llvm.org/D21157 llvm-svn: 272369
This commit is contained in:
parent
d0d202e5ad
commit
8110e5a8a3
@ -16,19 +16,27 @@
|
||||
#include "llvm/Support/Error.h"
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
|
||||
namespace llvm {
|
||||
namespace codeview {
|
||||
class StreamReader;
|
||||
|
||||
class ByteStream : public StreamInterface {
|
||||
template <bool Writable = false> class ByteStream : public StreamInterface {
|
||||
typedef typename std::conditional<Writable, MutableArrayRef<uint8_t>,
|
||||
ArrayRef<uint8_t>>::type ArrayType;
|
||||
|
||||
public:
|
||||
ByteStream();
|
||||
explicit ByteStream(ArrayRef<uint8_t> Data);
|
||||
~ByteStream() override;
|
||||
ByteStream() {}
|
||||
explicit ByteStream(ArrayType Data) : Data(Data) {}
|
||||
~ByteStream() override {}
|
||||
|
||||
Error readBytes(uint32_t Offset, uint32_t Size,
|
||||
ArrayRef<uint8_t> &Buffer) const override;
|
||||
Error readLongestContiguousChunk(uint32_t Offset,
|
||||
ArrayRef<uint8_t> &Buffer) const override;
|
||||
|
||||
Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Buffer) const override;
|
||||
|
||||
uint32_t getLength() const override;
|
||||
|
||||
@ -36,9 +44,12 @@ public:
|
||||
StringRef str() const;
|
||||
|
||||
private:
|
||||
ArrayRef<uint8_t> Data;
|
||||
ArrayType Data;
|
||||
};
|
||||
|
||||
extern template class ByteStream<true>;
|
||||
extern template class ByteStream<false>;
|
||||
|
||||
} // end namespace pdb
|
||||
} // end namespace llvm
|
||||
|
||||
|
@ -19,6 +19,7 @@ namespace codeview {
|
||||
enum class cv_error_code {
|
||||
unspecified = 1,
|
||||
insufficient_buffer,
|
||||
operation_unsupported,
|
||||
corrupt_record,
|
||||
};
|
||||
|
||||
|
@ -97,6 +97,8 @@ public:
|
||||
|
||||
const Extractor &getExtractor() const { return E; }
|
||||
|
||||
StreamRef getUnderlyingStream() const { return Stream; }
|
||||
|
||||
private:
|
||||
StreamRef Stream;
|
||||
Extractor E;
|
||||
|
@ -28,9 +28,22 @@ class StreamInterface {
|
||||
public:
|
||||
virtual ~StreamInterface() {}
|
||||
|
||||
// Given an offset into the stream and a number of bytes, attempt to read
|
||||
// the bytes and set the output ArrayRef to point to a reference into the
|
||||
// stream, without copying any data.
|
||||
virtual Error readBytes(uint32_t Offset, uint32_t Size,
|
||||
ArrayRef<uint8_t> &Buffer) const = 0;
|
||||
|
||||
// Given an offset into the stream, read as much as possible without copying
|
||||
// any data.
|
||||
virtual Error readLongestContiguousChunk(uint32_t Offset,
|
||||
ArrayRef<uint8_t> &Buffer) const = 0;
|
||||
|
||||
// Attempt to write the given bytes into the stream at the desired offset.
|
||||
// This will always necessitate a copy. Cannot shrink or grow the stream,
|
||||
// only writes into existing allocated space.
|
||||
virtual Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Data) const = 0;
|
||||
|
||||
virtual uint32_t getLength() const = 0;
|
||||
};
|
||||
|
||||
|
@ -28,6 +28,7 @@ class StreamReader {
|
||||
public:
|
||||
StreamReader(StreamRef Stream);
|
||||
|
||||
Error readLongestContiguousChunk(ArrayRef<uint8_t> &Buffer);
|
||||
Error readBytes(ArrayRef<uint8_t> &Buffer, uint32_t Size);
|
||||
Error readInteger(uint16_t &Dest);
|
||||
Error readInteger(uint32_t &Dest);
|
||||
|
@ -24,7 +24,8 @@ public:
|
||||
StreamRef(const StreamInterface &Stream, uint32_t Offset, uint32_t Length)
|
||||
: Stream(&Stream), ViewOffset(Offset), Length(Length) {}
|
||||
|
||||
StreamRef(const StreamRef &Stream, uint32_t Offset, uint32_t Length) = delete;
|
||||
// Use StreamRef.slice() instead.
|
||||
StreamRef(const StreamRef &S, uint32_t Offset, uint32_t Length) = delete;
|
||||
|
||||
Error readBytes(uint32_t Offset, uint32_t Size,
|
||||
ArrayRef<uint8_t> &Buffer) const override {
|
||||
@ -33,7 +34,32 @@ public:
|
||||
return Stream->readBytes(ViewOffset + Offset, Size, Buffer);
|
||||
}
|
||||
|
||||
// Given an offset into the stream, read as much as possible without copying
|
||||
// any data.
|
||||
Error readLongestContiguousChunk(uint32_t Offset,
|
||||
ArrayRef<uint8_t> &Buffer) const override {
|
||||
if (Offset >= Length)
|
||||
return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
|
||||
|
||||
if (auto EC = Stream->readLongestContiguousChunk(Offset, Buffer))
|
||||
return EC;
|
||||
// This StreamRef might refer to a smaller window over a larger stream. In
|
||||
// that case we will have read out more bytes than we should return, because
|
||||
// we should not read past the end of the current view.
|
||||
uint32_t MaxLength = Length - Offset;
|
||||
if (Buffer.size() > MaxLength)
|
||||
Buffer = Buffer.slice(0, MaxLength);
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Data) const override {
|
||||
if (Data.size() + Offset > Length)
|
||||
return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
|
||||
return Stream->writeBytes(ViewOffset + Offset, Data);
|
||||
}
|
||||
|
||||
uint32_t getLength() const override { return Length; }
|
||||
|
||||
StreamRef drop_front(uint32_t N) const {
|
||||
if (!Stream)
|
||||
return StreamRef();
|
||||
|
82
include/llvm/DebugInfo/CodeView/StreamWriter.h
Normal file
82
include/llvm/DebugInfo/CodeView/StreamWriter.h
Normal file
@ -0,0 +1,82 @@
|
||||
//===- StreamWriter.h - Writes bytes and objects to a stream ----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_DEBUGINFO_CODEVIEW_STREAMWRITER_H
|
||||
#define LLVM_DEBUGINFO_CODEVIEW_STREAMWRITER_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/DebugInfo/CodeView/CodeViewError.h"
|
||||
#include "llvm/DebugInfo/CodeView/StreamArray.h"
|
||||
#include "llvm/DebugInfo/CodeView/StreamInterface.h"
|
||||
#include "llvm/Support/Endian.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace llvm {
|
||||
namespace codeview {
|
||||
|
||||
class StreamRef;
|
||||
|
||||
class StreamWriter {
|
||||
public:
|
||||
StreamWriter(StreamRef Stream);
|
||||
|
||||
Error writeBytes(ArrayRef<uint8_t> Buffer);
|
||||
Error writeInteger(uint16_t Dest);
|
||||
Error writeInteger(uint32_t Dest);
|
||||
Error writeZeroString(StringRef Str);
|
||||
Error writeFixedString(StringRef Str);
|
||||
Error writeStreamRef(StreamRef Ref);
|
||||
Error writeStreamRef(StreamRef Ref, uint32_t Size);
|
||||
|
||||
template <typename T> Error writeEnum(T Num) {
|
||||
return writeInteger(
|
||||
static_cast<typename std::underlying_type<T>::type>(Num));
|
||||
}
|
||||
|
||||
template <typename T> Error writeObject(const T &Obj) {
|
||||
return writeBytes(
|
||||
ArrayRef<uint8_t>(reinterpret_cast<const uint8_t *>(&Obj), sizeof(T)));
|
||||
}
|
||||
|
||||
template <typename T> Error writeArray(ArrayRef<T> Array) {
|
||||
if (Array.size() == 0)
|
||||
return Error::success();
|
||||
|
||||
if (Array.size() > UINT32_MAX / sizeof(T))
|
||||
return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
|
||||
|
||||
return writeBytes(
|
||||
ArrayRef<uint8_t>(reinterpret_cast<const uint8_t *>(Array.data()),
|
||||
Array.size() * sizeof(T)));
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
Error writeArray(VarStreamArray<T, U> Array) {
|
||||
return writeStreamRef(Array.getUnderlyingStream());
|
||||
}
|
||||
|
||||
template <typename T> Error writeArray(FixedStreamArray<T> Array) {
|
||||
return writeStreamRef(Array.getUnderlyingStream());
|
||||
}
|
||||
|
||||
void setOffset(uint32_t Off) { Offset = Off; }
|
||||
uint32_t getOffset() const { return Offset; }
|
||||
uint32_t getLength() const { return Stream.getLength(); }
|
||||
uint32_t bytesRemaining() const { return getLength() - getOffset(); }
|
||||
|
||||
private:
|
||||
StreamRef Stream;
|
||||
uint32_t Offset;
|
||||
};
|
||||
} // namespace codeview
|
||||
} // namespace llvm
|
||||
|
||||
#endif // LLVM_DEBUGINFO_CODEVIEW_STREAMREADER_H
|
@ -14,6 +14,7 @@
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/DebugInfo/CodeView/StreamArray.h"
|
||||
#include "llvm/Support/Endian.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
@ -34,6 +35,8 @@ public:
|
||||
|
||||
virtual ArrayRef<uint8_t> getBlockData(uint32_t BlockIndex,
|
||||
uint32_t NumBytes) const = 0;
|
||||
virtual Error setBlockData(uint32_t BlockIndex, uint32_t Offset,
|
||||
ArrayRef<uint8_t> Data) const = 0;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -31,6 +31,9 @@ class MappedBlockStream : public codeview::StreamInterface {
|
||||
public:
|
||||
Error readBytes(uint32_t Offset, uint32_t Size,
|
||||
ArrayRef<uint8_t> &Buffer) const override;
|
||||
Error readLongestContiguousChunk(uint32_t Offset,
|
||||
ArrayRef<uint8_t> &Buffer) const override;
|
||||
Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Buffer) const override;
|
||||
|
||||
uint32_t getLength() const override;
|
||||
|
||||
@ -51,8 +54,9 @@ protected:
|
||||
const IPDBFile &Pdb;
|
||||
std::unique_ptr<IPDBStreamData> Data;
|
||||
|
||||
typedef MutableArrayRef<uint8_t> CacheEntry;
|
||||
mutable llvm::BumpPtrAllocator Pool;
|
||||
mutable DenseMap<uint32_t, uint8_t *> CacheMap;
|
||||
mutable DenseMap<uint32_t, std::vector<CacheEntry>> CacheMap;
|
||||
};
|
||||
|
||||
} // end namespace pdb
|
||||
|
@ -54,6 +54,8 @@ public:
|
||||
|
||||
ArrayRef<uint8_t> getBlockData(uint32_t BlockIndex,
|
||||
uint32_t NumBytes) const override;
|
||||
Error setBlockData(uint32_t BlockIndex, uint32_t Offset,
|
||||
ArrayRef<uint8_t> Data) const override;
|
||||
|
||||
ArrayRef<support::ulittle32_t> getDirectoryBlockArray() const;
|
||||
|
||||
|
@ -22,7 +22,9 @@ enum class raw_error_code {
|
||||
corrupt_file,
|
||||
insufficient_buffer,
|
||||
no_stream,
|
||||
index_out_of_bounds
|
||||
index_out_of_bounds,
|
||||
invalid_block_address,
|
||||
not_writable,
|
||||
};
|
||||
|
||||
/// Base class for errors originating when parsing raw PDB files
|
||||
|
@ -681,6 +681,14 @@ inline int64_t SignExtend64(uint64_t X, unsigned B) {
|
||||
return int64_t(X << (64 - B)) >> (64 - B);
|
||||
}
|
||||
|
||||
/// \brief Subtract two unsigned integers, X and Y, of type T and return their
|
||||
/// absolute value.
|
||||
template <typename T>
|
||||
typename std::enable_if<std::is_unsigned<T>::value, T>::type
|
||||
AbsoluteDifference(T X, T Y) {
|
||||
return std::max(X, Y) - std::min(X, Y);
|
||||
}
|
||||
|
||||
/// \brief Add two unsigned integers, X and Y, of type T.
|
||||
/// Clamp the result to the maximum representable value of T on overflow.
|
||||
/// ResultOverflowed indicates if the result is larger than the maximum
|
||||
|
@ -15,23 +15,61 @@
|
||||
using namespace llvm;
|
||||
using namespace llvm::codeview;
|
||||
|
||||
ByteStream::ByteStream() {}
|
||||
static Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Src,
|
||||
ArrayRef<uint8_t> Dest) {
|
||||
return make_error<CodeViewError>(cv_error_code::operation_unsupported,
|
||||
"ByteStream is immutable.");
|
||||
}
|
||||
|
||||
ByteStream::ByteStream(ArrayRef<uint8_t> Data) : Data(Data) {}
|
||||
static Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Src,
|
||||
MutableArrayRef<uint8_t> Dest) {
|
||||
if (Dest.size() < Src.size())
|
||||
return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
|
||||
if (Offset > Src.size() - Dest.size())
|
||||
return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
|
||||
|
||||
ByteStream::~ByteStream() {}
|
||||
::memcpy(Dest.data() + Offset, Src.data(), Src.size());
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
Error ByteStream::readBytes(uint32_t Offset, uint32_t Size,
|
||||
ArrayRef<uint8_t> &Buffer) const {
|
||||
template <bool Writable>
|
||||
Error ByteStream<Writable>::readBytes(uint32_t Offset, uint32_t Size,
|
||||
ArrayRef<uint8_t> &Buffer) const {
|
||||
if (Offset > Data.size())
|
||||
return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
|
||||
if (Data.size() < Size + Offset)
|
||||
return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
|
||||
Buffer = Data.slice(Offset, Size);
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
uint32_t ByteStream::getLength() const { return Data.size(); }
|
||||
template <bool Writable>
|
||||
Error ByteStream<Writable>::readLongestContiguousChunk(
|
||||
uint32_t Offset, ArrayRef<uint8_t> &Buffer) const {
|
||||
if (Offset >= Data.size())
|
||||
return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
|
||||
Buffer = Data.slice(Offset);
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
StringRef ByteStream::str() const {
|
||||
template <bool Writable>
|
||||
Error ByteStream<Writable>::writeBytes(uint32_t Offset,
|
||||
ArrayRef<uint8_t> Buffer) const {
|
||||
return ::writeBytes(Offset, Buffer, Data);
|
||||
}
|
||||
|
||||
template <bool Writable> uint32_t ByteStream<Writable>::getLength() const {
|
||||
return Data.size();
|
||||
}
|
||||
|
||||
template <bool Writable> StringRef ByteStream<Writable>::str() const {
|
||||
const char *CharData = reinterpret_cast<const char *>(Data.data());
|
||||
return StringRef(CharData, Data.size());
|
||||
}
|
||||
|
||||
namespace llvm {
|
||||
namespace codeview {
|
||||
template class ByteStream<true>;
|
||||
template class ByteStream<false>;
|
||||
}
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ add_llvm_library(LLVMDebugInfoCodeView
|
||||
ModuleSubstreamVisitor.cpp
|
||||
RecordSerialization.cpp
|
||||
StreamReader.cpp
|
||||
StreamWriter.cpp
|
||||
SymbolDumper.cpp
|
||||
TypeDumper.cpp
|
||||
TypeRecord.cpp
|
||||
|
@ -31,6 +31,8 @@ public:
|
||||
"bytes.";
|
||||
case cv_error_code::corrupt_record:
|
||||
return "The CodeView record is corrupted.";
|
||||
case cv_error_code::operation_unsupported:
|
||||
return "The requested operation is not supported.";
|
||||
}
|
||||
llvm_unreachable("Unrecognized cv_error_code");
|
||||
}
|
||||
|
@ -17,6 +17,13 @@ using namespace llvm::codeview;
|
||||
|
||||
StreamReader::StreamReader(StreamRef S) : Stream(S), Offset(0) {}
|
||||
|
||||
Error StreamReader::readLongestContiguousChunk(ArrayRef<uint8_t> &Buffer) {
|
||||
if (auto EC = Stream.readLongestContiguousChunk(Offset, Buffer))
|
||||
return EC;
|
||||
Offset += Buffer.size();
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
Error StreamReader::readBytes(ArrayRef<uint8_t> &Buffer, uint32_t Size) {
|
||||
if (auto EC = Stream.readBytes(Offset, Size, Buffer))
|
||||
return EC;
|
||||
|
77
lib/DebugInfo/CodeView/StreamWriter.cpp
Normal file
77
lib/DebugInfo/CodeView/StreamWriter.cpp
Normal file
@ -0,0 +1,77 @@
|
||||
//===- StreamWrite.cpp - Writes bytes and objects to a stream -------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/DebugInfo/CodeView/StreamWriter.h"
|
||||
|
||||
#include "llvm/DebugInfo/CodeView/CodeViewError.h"
|
||||
#include "llvm/DebugInfo/CodeView/StreamReader.h"
|
||||
#include "llvm/DebugInfo/CodeView/StreamRef.h"
|
||||
|
||||
using namespace llvm;
|
||||
using namespace llvm::codeview;
|
||||
|
||||
StreamWriter::StreamWriter(StreamRef S) : Stream(S), Offset(0) {}
|
||||
|
||||
Error StreamWriter::writeBytes(ArrayRef<uint8_t> Buffer) {
|
||||
if (auto EC = Stream.writeBytes(Offset, Buffer))
|
||||
return EC;
|
||||
Offset += Buffer.size();
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
Error StreamWriter::writeInteger(uint16_t Int) {
|
||||
return writeObject(support::ulittle16_t(Int));
|
||||
}
|
||||
|
||||
Error StreamWriter::writeInteger(uint32_t Int) {
|
||||
return writeObject(support::ulittle32_t(Int));
|
||||
}
|
||||
|
||||
Error StreamWriter::writeZeroString(StringRef Str) {
|
||||
if (auto EC = writeFixedString(Str))
|
||||
return EC;
|
||||
if (auto EC = writeObject('\0'))
|
||||
return EC;
|
||||
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
Error StreamWriter::writeFixedString(StringRef Str) {
|
||||
ArrayRef<uint8_t> Bytes(Str.bytes_begin(), Str.bytes_end());
|
||||
if (auto EC = Stream.writeBytes(Offset, Bytes))
|
||||
return EC;
|
||||
|
||||
Offset += Str.size();
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
Error StreamWriter::writeStreamRef(StreamRef Ref) {
|
||||
if (auto EC = writeStreamRef(Ref, Ref.getLength()))
|
||||
return EC;
|
||||
Offset += Ref.getLength();
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
Error StreamWriter::writeStreamRef(StreamRef Ref, uint32_t Length) {
|
||||
Ref = Ref.slice(0, Length);
|
||||
|
||||
StreamReader SrcReader(Ref);
|
||||
// This is a bit tricky. If we just call readBytes, we are requiring that it
|
||||
// return us the entire stream as a contiguous buffer. For large streams this
|
||||
// will allocate a huge amount of space from the pool. Instead, iterate over
|
||||
// each contiguous chunk until we've consumed the entire stream.
|
||||
while (SrcReader.bytesRemaining() > 0) {
|
||||
ArrayRef<uint8_t> Chunk;
|
||||
if (auto EC = SrcReader.readLongestContiguousChunk(Chunk))
|
||||
return EC;
|
||||
if (auto EC = writeBytes(Chunk))
|
||||
return EC;
|
||||
}
|
||||
return Error::success();
|
||||
}
|
@ -692,7 +692,7 @@ bool CVTypeDumper::dump(const CVTypeArray &Types) {
|
||||
}
|
||||
|
||||
bool CVTypeDumper::dump(ArrayRef<uint8_t> Data) {
|
||||
ByteStream Stream(Data);
|
||||
ByteStream<> Stream(Data);
|
||||
CVTypeArray Types;
|
||||
StreamReader Reader(Stream);
|
||||
if (auto EC = Reader.readArray(Types, Reader.getLength())) {
|
||||
|
@ -29,6 +29,12 @@ public:
|
||||
};
|
||||
}
|
||||
|
||||
typedef std::pair<uint32_t, uint32_t> Interval;
|
||||
static Interval intersect(const Interval &I1, const Interval &I2) {
|
||||
return std::make_pair(std::max(I1.first, I2.first),
|
||||
std::min(I1.second, I2.second));
|
||||
}
|
||||
|
||||
MappedBlockStream::MappedBlockStream(std::unique_ptr<IPDBStreamData> Data,
|
||||
const IPDBFile &Pdb)
|
||||
: Pdb(Pdb), Data(std::move(Data)) {}
|
||||
@ -46,26 +52,99 @@ Error MappedBlockStream::readBytes(uint32_t Offset, uint32_t Size,
|
||||
|
||||
auto CacheIter = CacheMap.find(Offset);
|
||||
if (CacheIter != CacheMap.end()) {
|
||||
// In a more general solution, we would need to guarantee that the
|
||||
// cached allocation is at least the requested size. In practice, since
|
||||
// these are CodeView / PDB records, we know they are always formatted
|
||||
// the same way and never change, so we should never be requesting two
|
||||
// allocations from the same address with different sizes.
|
||||
Buffer = ArrayRef<uint8_t>(CacheIter->second, Size);
|
||||
// Try to find an alloc that was large enough for this request.
|
||||
for (auto &Entry : CacheIter->second) {
|
||||
if (Entry.size() >= Size) {
|
||||
Buffer = Entry.slice(0, Size);
|
||||
return Error::success();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We couldn't find a buffer that started at the correct offset (the most
|
||||
// common scenario). Try to see if there is a buffer that starts at some
|
||||
// other offset but overlaps the desired range.
|
||||
for (auto &CacheItem : CacheMap) {
|
||||
Interval RequestExtent = std::make_pair(Offset, Offset + Size);
|
||||
|
||||
// We already checked this one on the fast path above.
|
||||
if (CacheItem.first == Offset)
|
||||
continue;
|
||||
// If the initial extent of the cached item is beyond the ending extent
|
||||
// of the request, there is no overlap.
|
||||
if (CacheItem.first >= Offset + Size)
|
||||
continue;
|
||||
|
||||
// We really only have to check the last item in the list, since we append
|
||||
// in order of increasing length.
|
||||
if (CacheItem.second.empty())
|
||||
continue;
|
||||
|
||||
auto CachedAlloc = CacheItem.second.back();
|
||||
// If the initial extent of the request is beyond the ending extent of
|
||||
// the cached item, there is no overlap.
|
||||
Interval CachedExtent =
|
||||
std::make_pair(CacheItem.first, CacheItem.first + CachedAlloc.size());
|
||||
if (RequestExtent.first >= CachedExtent.first + CachedExtent.second)
|
||||
continue;
|
||||
|
||||
Interval Intersection = intersect(CachedExtent, RequestExtent);
|
||||
// Only use this if the entire request extent is contained in the cached
|
||||
// extent.
|
||||
if (Intersection != RequestExtent)
|
||||
continue;
|
||||
|
||||
uint32_t CacheRangeOffset =
|
||||
AbsoluteDifference(CachedExtent.first, Intersection.first);
|
||||
Buffer = CachedAlloc.slice(CacheRangeOffset, Size);
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
// Otherwise allocate a large enough buffer in the pool, memcpy the data
|
||||
// into it, and return an ArrayRef to that.
|
||||
// into it, and return an ArrayRef to that. Do not touch existing pool
|
||||
// allocations, as existing clients may be holding a pointer which must
|
||||
// not be invalidated.
|
||||
uint8_t *WriteBuffer = Pool.Allocate<uint8_t>(Size);
|
||||
|
||||
if (auto EC = readBytes(Offset, MutableArrayRef<uint8_t>(WriteBuffer, Size)))
|
||||
return EC;
|
||||
CacheMap.insert(std::make_pair(Offset, WriteBuffer));
|
||||
|
||||
if (CacheIter != CacheMap.end()) {
|
||||
CacheIter->second.emplace_back(WriteBuffer, Size);
|
||||
} else {
|
||||
std::vector<CacheEntry> List;
|
||||
List.emplace_back(WriteBuffer, Size);
|
||||
CacheMap.insert(std::make_pair(Offset, List));
|
||||
}
|
||||
Buffer = ArrayRef<uint8_t>(WriteBuffer, Size);
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
Error MappedBlockStream::readLongestContiguousChunk(
|
||||
uint32_t Offset, ArrayRef<uint8_t> &Buffer) const {
|
||||
// Make sure we aren't trying to read beyond the end of the stream.
|
||||
if (Offset >= Data->getLength())
|
||||
return make_error<RawError>(raw_error_code::insufficient_buffer);
|
||||
uint32_t First = Offset / Pdb.getBlockSize();
|
||||
uint32_t Last = First;
|
||||
|
||||
auto BlockList = Data->getStreamBlocks();
|
||||
while (Last < Pdb.getBlockCount() - 1) {
|
||||
if (BlockList[Last] != BlockList[Last + 1] - 1)
|
||||
break;
|
||||
++Last;
|
||||
}
|
||||
|
||||
uint32_t OffsetInFirstBlock = Offset % Pdb.getBlockSize();
|
||||
uint32_t BytesFromFirstBlock = Pdb.getBlockSize() - OffsetInFirstBlock;
|
||||
uint32_t BlockSpan = Last - First + 1;
|
||||
uint32_t ByteSpan =
|
||||
BytesFromFirstBlock + (BlockSpan - 1) * Pdb.getBlockSize();
|
||||
Buffer = Pdb.getBlockData(BlockList[First], Pdb.getBlockSize());
|
||||
Buffer = Buffer.drop_front(OffsetInFirstBlock);
|
||||
Buffer = ArrayRef<uint8_t>(Buffer.data(), ByteSpan);
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
uint32_t MappedBlockStream::getLength() const { return Data->getLength(); }
|
||||
|
||||
bool MappedBlockStream::tryReadContiguously(uint32_t Offset, uint32_t Size,
|
||||
@ -130,7 +209,73 @@ Error MappedBlockStream::readBytes(uint32_t Offset,
|
||||
}
|
||||
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
Error MappedBlockStream::writeBytes(uint32_t Offset,
|
||||
ArrayRef<uint8_t> Buffer) const {
|
||||
// Make sure we aren't trying to write beyond the end of the stream.
|
||||
if (Buffer.size() > Data->getLength())
|
||||
return make_error<RawError>(raw_error_code::insufficient_buffer);
|
||||
|
||||
if (Offset > Data->getLength() - Buffer.size())
|
||||
return make_error<RawError>(raw_error_code::insufficient_buffer);
|
||||
|
||||
uint32_t BlockNum = Offset / Pdb.getBlockSize();
|
||||
uint32_t OffsetInBlock = Offset % Pdb.getBlockSize();
|
||||
|
||||
uint32_t BytesLeft = Buffer.size();
|
||||
auto BlockList = Data->getStreamBlocks();
|
||||
uint32_t BytesWritten = 0;
|
||||
while (BytesLeft > 0) {
|
||||
uint32_t StreamBlockAddr = BlockList[BlockNum];
|
||||
uint32_t BytesToWriteInChunk =
|
||||
std::min(BytesLeft, Pdb.getBlockSize() - OffsetInBlock);
|
||||
|
||||
const uint8_t *Chunk = Buffer.data() + BytesWritten;
|
||||
ArrayRef<uint8_t> ChunkData(Chunk, BytesToWriteInChunk);
|
||||
if (auto EC = Pdb.setBlockData(StreamBlockAddr, OffsetInBlock, ChunkData))
|
||||
return EC;
|
||||
|
||||
BytesLeft -= BytesToWriteInChunk;
|
||||
BytesWritten += BytesToWriteInChunk;
|
||||
++BlockNum;
|
||||
OffsetInBlock = 0;
|
||||
}
|
||||
|
||||
// If this write overlapped a read which previously came from the pool,
|
||||
// someone may still be holding a pointer to that alloc which is now invalid.
|
||||
// Compute the overlapping range and update the cache entry, so any
|
||||
// outstanding buffers are automatically updated.
|
||||
for (const auto &MapEntry : CacheMap) {
|
||||
// If the end of the written extent precedes the beginning of the cached
|
||||
// extent, ignore this map entry.
|
||||
if (Offset + BytesWritten < MapEntry.first)
|
||||
continue;
|
||||
for (const auto &Alloc : MapEntry.second) {
|
||||
// If the end of the cached extent precedes the beginning of the written
|
||||
// extent, ignore this alloc.
|
||||
if (MapEntry.first + Alloc.size() < Offset)
|
||||
continue;
|
||||
|
||||
// If we get here, they are guaranteed to overlap.
|
||||
Interval WriteInterval = std::make_pair(Offset, Offset + BytesWritten);
|
||||
Interval CachedInterval =
|
||||
std::make_pair(MapEntry.first, MapEntry.first + Alloc.size());
|
||||
// If they overlap, we need to write the new data into the overlapping
|
||||
// range.
|
||||
auto Intersection = intersect(WriteInterval, CachedInterval);
|
||||
assert(Intersection.first <= Intersection.second);
|
||||
|
||||
uint32_t Length = Intersection.second - Intersection.first;
|
||||
uint32_t SrcOffset =
|
||||
AbsoluteDifference(WriteInterval.first, Intersection.first);
|
||||
uint32_t DestOffset =
|
||||
AbsoluteDifference(CachedInterval.first, Intersection.first);
|
||||
::memcpy(Alloc.data() + DestOffset, Buffer.data() + SrcOffset, Length);
|
||||
}
|
||||
}
|
||||
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
uint32_t MappedBlockStream::getNumBytesCopied() const {
|
||||
|
@ -136,6 +136,17 @@ ArrayRef<uint8_t> PDBFile::getBlockData(uint32_t BlockIndex,
|
||||
NumBytes);
|
||||
}
|
||||
|
||||
Error PDBFile::setBlockData(uint32_t BlockIndex, uint32_t Offset,
|
||||
ArrayRef<uint8_t> Data) const {
|
||||
if (BlockIndex >= getBlockCount())
|
||||
return make_error<RawError>(raw_error_code::invalid_block_address);
|
||||
|
||||
if (Offset > getBlockSize() - Data.size())
|
||||
return make_error<RawError>(raw_error_code::insufficient_buffer);
|
||||
|
||||
return make_error<RawError>(raw_error_code::not_writable);
|
||||
}
|
||||
|
||||
Error PDBFile::parseFileHeaders() {
|
||||
std::error_code EC;
|
||||
MemoryBufferRef BufferRef = *Context->Buffer;
|
||||
|
@ -28,6 +28,10 @@ public:
|
||||
return "The specified stream could not be loaded.";
|
||||
case raw_error_code::index_out_of_bounds:
|
||||
return "The specified item does not exist in the array.";
|
||||
case raw_error_code::invalid_block_address:
|
||||
return "The specified block address is not valid.";
|
||||
case raw_error_code::not_writable:
|
||||
return "The PDB does not support writing.";
|
||||
}
|
||||
llvm_unreachable("Unrecognized raw_error_code");
|
||||
}
|
||||
|
@ -969,7 +969,7 @@ void COFFDumper::printCodeViewSymbolsSubsection(StringRef Subsection,
|
||||
SectionContents);
|
||||
|
||||
CVSymbolDumper CVSD(W, CVTD, std::move(CODD), opts::CodeViewSubsectionBytes);
|
||||
ByteStream Stream(BinaryData);
|
||||
ByteStream<> Stream(BinaryData);
|
||||
CVSymbolArray Symbols;
|
||||
StreamReader Reader(Stream);
|
||||
if (auto EC = Reader.readArray(Symbols, Reader.getLength())) {
|
||||
@ -1077,7 +1077,7 @@ void COFFDumper::mergeCodeViewTypes(MemoryTypeTableBuilder &CVTypes) {
|
||||
error(object_error::parse_failed);
|
||||
ArrayRef<uint8_t> Bytes(reinterpret_cast<const uint8_t *>(Data.data()),
|
||||
Data.size());
|
||||
ByteStream Stream(Bytes);
|
||||
ByteStream<> Stream(Bytes);
|
||||
CVTypeArray Types;
|
||||
StreamReader Reader(Stream);
|
||||
if (auto EC = Reader.readArray(Types, Reader.getLength())) {
|
||||
|
@ -9,8 +9,10 @@
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
#include "llvm/DebugInfo/CodeView/ByteStream.h"
|
||||
#include "llvm/DebugInfo/CodeView/StreamReader.h"
|
||||
#include "llvm/DebugInfo/CodeView/StreamRef.h"
|
||||
#include "llvm/DebugInfo/CodeView/StreamWriter.h"
|
||||
#include "llvm/DebugInfo/PDB/Raw/IPDBFile.h"
|
||||
#include "llvm/DebugInfo/PDB/Raw/IPDBStreamData.h"
|
||||
#include "llvm/DebugInfo/PDB/Raw/IndexedStreamData.h"
|
||||
@ -40,35 +42,43 @@ namespace {
|
||||
}
|
||||
|
||||
static const uint32_t BlocksAry[] = {0, 1, 2, 5, 4, 3, 6, 7, 8, 9};
|
||||
static const char DataAry[] = {'A', 'B', 'C', 'F', 'E',
|
||||
'D', 'G', 'H', 'I', 'J'};
|
||||
static uint8_t DataAry[] = {'A', 'B', 'C', 'F', 'E', 'D', 'G', 'H', 'I', 'J'};
|
||||
|
||||
class DiscontiguousFile : public IPDBFile {
|
||||
public:
|
||||
DiscontiguousFile()
|
||||
: Blocks(std::begin(BlocksAry), std::end(BlocksAry)),
|
||||
Data(std::begin(DataAry), std::end(DataAry)) {}
|
||||
DiscontiguousFile(ArrayRef<uint32_t> Blocks, MutableArrayRef<uint8_t> Data)
|
||||
: Blocks(Blocks.begin(), Blocks.end()), Data(Data.begin(), Data.end()) {}
|
||||
|
||||
virtual uint32_t getBlockSize() const override { return 1; }
|
||||
virtual uint32_t getBlockCount() const override { return 10; }
|
||||
virtual uint32_t getNumStreams() const override { return 1; }
|
||||
virtual uint32_t getStreamByteSize(uint32_t StreamIndex) const override {
|
||||
uint32_t getBlockSize() const override { return 1; }
|
||||
uint32_t getBlockCount() const override { return Blocks.size(); }
|
||||
uint32_t getNumStreams() const override { return 1; }
|
||||
uint32_t getStreamByteSize(uint32_t StreamIndex) const override {
|
||||
return getBlockCount() * getBlockSize();
|
||||
}
|
||||
virtual ArrayRef<support::ulittle32_t>
|
||||
ArrayRef<support::ulittle32_t>
|
||||
getStreamBlockList(uint32_t StreamIndex) const override {
|
||||
if (StreamIndex != 0)
|
||||
return ArrayRef<support::ulittle32_t>();
|
||||
return Blocks;
|
||||
}
|
||||
virtual ArrayRef<uint8_t> getBlockData(uint32_t BlockIndex,
|
||||
uint32_t NumBytes) const override {
|
||||
ArrayRef<uint8_t> getBlockData(uint32_t BlockIndex,
|
||||
uint32_t NumBytes) const override {
|
||||
return ArrayRef<uint8_t>(&Data[BlockIndex], NumBytes);
|
||||
}
|
||||
|
||||
Error setBlockData(uint32_t BlockIndex, uint32_t Offset,
|
||||
ArrayRef<uint8_t> SrcData) const override {
|
||||
if (BlockIndex >= Blocks.size())
|
||||
return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
|
||||
if (Offset > getBlockSize() - SrcData.size())
|
||||
return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
|
||||
::memcpy(&Data[BlockIndex] + Offset, SrcData.data(), SrcData.size());
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<support::ulittle32_t> Blocks;
|
||||
std::vector<uint8_t> Data;
|
||||
MutableArrayRef<uint8_t> Data;
|
||||
};
|
||||
|
||||
class MappedBlockStreamImpl : public MappedBlockStream {
|
||||
@ -81,7 +91,7 @@ public:
|
||||
// Tests that a read which is entirely contained within a single block works
|
||||
// and does not allocate.
|
||||
TEST(MappedBlockStreamTest, ReadBeyondEndOfStreamRef) {
|
||||
DiscontiguousFile F;
|
||||
DiscontiguousFile F(BlocksAry, DataAry);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
StreamReader R(S);
|
||||
StreamRef SR;
|
||||
@ -95,7 +105,7 @@ TEST(MappedBlockStreamTest, ReadBeyondEndOfStreamRef) {
|
||||
// Tests that a read which outputs into a full destination buffer works and
|
||||
// does not fail due to the length of the output buffer.
|
||||
TEST(MappedBlockStreamTest, ReadOntoNonEmptyBuffer) {
|
||||
DiscontiguousFile F;
|
||||
DiscontiguousFile F(BlocksAry, DataAry);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
StreamReader R(S);
|
||||
StringRef Str = "ZYXWVUTSRQPONMLKJIHGFEDCBA";
|
||||
@ -108,7 +118,7 @@ TEST(MappedBlockStreamTest, ReadOntoNonEmptyBuffer) {
|
||||
// blocks are still contiguous in memory to the previous block works and does
|
||||
// not allocate memory.
|
||||
TEST(MappedBlockStreamTest, ZeroCopyReadContiguousBreak) {
|
||||
DiscontiguousFile F;
|
||||
DiscontiguousFile F(BlocksAry, DataAry);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
StreamReader R(S);
|
||||
StringRef Str;
|
||||
@ -126,7 +136,7 @@ TEST(MappedBlockStreamTest, ZeroCopyReadContiguousBreak) {
|
||||
// contiguously works and allocates only the precise amount of bytes
|
||||
// requested.
|
||||
TEST(MappedBlockStreamTest, CopyReadNonContiguousBreak) {
|
||||
DiscontiguousFile F;
|
||||
DiscontiguousFile F(BlocksAry, DataAry);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
StreamReader R(S);
|
||||
StringRef Str;
|
||||
@ -138,7 +148,7 @@ TEST(MappedBlockStreamTest, CopyReadNonContiguousBreak) {
|
||||
// Test that an out of bounds read which doesn't cross a block boundary
|
||||
// fails and allocates no memory.
|
||||
TEST(MappedBlockStreamTest, InvalidReadSizeNoBreak) {
|
||||
DiscontiguousFile F;
|
||||
DiscontiguousFile F(BlocksAry, DataAry);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
StreamReader R(S);
|
||||
StringRef Str;
|
||||
@ -151,7 +161,7 @@ TEST(MappedBlockStreamTest, InvalidReadSizeNoBreak) {
|
||||
// Test that an out of bounds read which crosses a contiguous block boundary
|
||||
// fails and allocates no memory.
|
||||
TEST(MappedBlockStreamTest, InvalidReadSizeContiguousBreak) {
|
||||
DiscontiguousFile F;
|
||||
DiscontiguousFile F(BlocksAry, DataAry);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
StreamReader R(S);
|
||||
StringRef Str;
|
||||
@ -164,7 +174,7 @@ TEST(MappedBlockStreamTest, InvalidReadSizeContiguousBreak) {
|
||||
// Test that an out of bounds read which crosses a discontiguous block
|
||||
// boundary fails and allocates no memory.
|
||||
TEST(MappedBlockStreamTest, InvalidReadSizeNonContiguousBreak) {
|
||||
DiscontiguousFile F;
|
||||
DiscontiguousFile F(BlocksAry, DataAry);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
StreamReader R(S);
|
||||
StringRef Str;
|
||||
@ -176,7 +186,7 @@ TEST(MappedBlockStreamTest, InvalidReadSizeNonContiguousBreak) {
|
||||
// Tests that a read which is entirely contained within a single block but
|
||||
// beyond the end of a StreamRef fails.
|
||||
TEST(MappedBlockStreamTest, ZeroCopyReadNoBreak) {
|
||||
DiscontiguousFile F;
|
||||
DiscontiguousFile F(BlocksAry, DataAry);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
StreamReader R(S);
|
||||
StringRef Str;
|
||||
@ -185,4 +195,257 @@ TEST(MappedBlockStreamTest, ZeroCopyReadNoBreak) {
|
||||
EXPECT_EQ(0U, S.getNumBytesCopied());
|
||||
}
|
||||
|
||||
// Tests that a read which is not aligned on the same boundary as a previous
|
||||
// cached request, but which is known to overlap that request, shares the
|
||||
// previous allocation.
|
||||
TEST(MappedBlockStreamTest, UnalignedOverlappingRead) {
|
||||
DiscontiguousFile F(BlocksAry, DataAry);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
StreamReader R(S);
|
||||
StringRef Str1;
|
||||
StringRef Str2;
|
||||
EXPECT_NO_ERROR(R.readFixedString(Str1, 7));
|
||||
EXPECT_EQ(Str1, StringRef("ABCDEFG"));
|
||||
EXPECT_EQ(7U, S.getNumBytesCopied());
|
||||
|
||||
R.setOffset(2);
|
||||
EXPECT_NO_ERROR(R.readFixedString(Str2, 3));
|
||||
EXPECT_EQ(Str2, StringRef("CDE"));
|
||||
EXPECT_EQ(Str1.data() + 2, Str2.data());
|
||||
EXPECT_EQ(7U, S.getNumBytesCopied());
|
||||
}
|
||||
|
||||
// Tests that a read which is not aligned on the same boundary as a previous
|
||||
// cached request, but which only partially overlaps a previous cached request,
|
||||
// still works correctly and allocates again from the shared pool.
|
||||
TEST(MappedBlockStreamTest, UnalignedOverlappingReadFail) {
|
||||
DiscontiguousFile F(BlocksAry, DataAry);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
StreamReader R(S);
|
||||
StringRef Str1;
|
||||
StringRef Str2;
|
||||
EXPECT_NO_ERROR(R.readFixedString(Str1, 6));
|
||||
EXPECT_EQ(Str1, StringRef("ABCDEF"));
|
||||
EXPECT_EQ(6U, S.getNumBytesCopied());
|
||||
|
||||
R.setOffset(4);
|
||||
EXPECT_NO_ERROR(R.readFixedString(Str2, 4));
|
||||
EXPECT_EQ(Str2, StringRef("EFGH"));
|
||||
EXPECT_EQ(10U, S.getNumBytesCopied());
|
||||
}
|
||||
|
||||
TEST(MappedBlockStreamTest, WriteBeyondEndOfStream) {
|
||||
static uint8_t Data[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'};
|
||||
static uint8_t LargeBuffer[] = {'0', '1', '2', '3', '4', '5',
|
||||
'6', '7', '8', '9', 'A'};
|
||||
static uint8_t SmallBuffer[] = {'0', '1', '2'};
|
||||
static_assert(sizeof(LargeBuffer) > sizeof(Data),
|
||||
"LargeBuffer is not big enough");
|
||||
|
||||
DiscontiguousFile F(BlocksAry, Data);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
ArrayRef<uint8_t> Buffer;
|
||||
|
||||
EXPECT_ERROR(S.writeBytes(0, ArrayRef<uint8_t>(LargeBuffer)));
|
||||
EXPECT_NO_ERROR(S.writeBytes(0, ArrayRef<uint8_t>(SmallBuffer)));
|
||||
EXPECT_NO_ERROR(S.writeBytes(7, ArrayRef<uint8_t>(SmallBuffer)));
|
||||
EXPECT_ERROR(S.writeBytes(8, ArrayRef<uint8_t>(SmallBuffer)));
|
||||
}
|
||||
|
||||
TEST(MappedBlockStreamTest, TestWriteBytesNoBreakBoundary) {
|
||||
static uint8_t Data[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'};
|
||||
DiscontiguousFile F(BlocksAry, Data);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
ArrayRef<uint8_t> Buffer;
|
||||
|
||||
EXPECT_NO_ERROR(S.readBytes(0, 1, Buffer));
|
||||
EXPECT_EQ(Buffer, ArrayRef<uint8_t>('A'));
|
||||
EXPECT_NO_ERROR(S.readBytes(9, 1, Buffer));
|
||||
EXPECT_EQ(Buffer, ArrayRef<uint8_t>('J'));
|
||||
|
||||
EXPECT_NO_ERROR(S.writeBytes(0, ArrayRef<uint8_t>('J')));
|
||||
EXPECT_NO_ERROR(S.writeBytes(9, ArrayRef<uint8_t>('A')));
|
||||
|
||||
EXPECT_NO_ERROR(S.readBytes(0, 1, Buffer));
|
||||
EXPECT_EQ(Buffer, ArrayRef<uint8_t>('J'));
|
||||
EXPECT_NO_ERROR(S.readBytes(9, 1, Buffer));
|
||||
EXPECT_EQ(Buffer, ArrayRef<uint8_t>('A'));
|
||||
|
||||
EXPECT_NO_ERROR(S.writeBytes(0, ArrayRef<uint8_t>('A')));
|
||||
EXPECT_NO_ERROR(S.writeBytes(9, ArrayRef<uint8_t>('J')));
|
||||
|
||||
EXPECT_NO_ERROR(S.readBytes(0, 1, Buffer));
|
||||
EXPECT_EQ(Buffer, ArrayRef<uint8_t>('A'));
|
||||
EXPECT_NO_ERROR(S.readBytes(9, 1, Buffer));
|
||||
EXPECT_EQ(Buffer, ArrayRef<uint8_t>('J'));
|
||||
}
|
||||
|
||||
TEST(MappedBlockStreamTest, TestWriteBytesBreakBoundary) {
|
||||
static uint8_t Data[] = {'0', '0', '0', '0', '0', '0', '0', '0', '0', '0'};
|
||||
static uint8_t TestData[] = {'T', 'E', 'S', 'T', 'I', 'N', 'G', '.'};
|
||||
static uint8_t Expected[] = {'T', 'E', 'S', 'N', 'I',
|
||||
'T', 'G', '.', '0', '0'};
|
||||
|
||||
DiscontiguousFile F(BlocksAry, Data);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
ArrayRef<uint8_t> Buffer;
|
||||
|
||||
EXPECT_NO_ERROR(S.writeBytes(0, TestData));
|
||||
// First just compare the memory, then compare the result of reading the
|
||||
// string out.
|
||||
EXPECT_EQ(ArrayRef<uint8_t>(Data), ArrayRef<uint8_t>(Expected));
|
||||
|
||||
EXPECT_NO_ERROR(S.readBytes(0, 8, Buffer));
|
||||
EXPECT_EQ(Buffer, ArrayRef<uint8_t>(TestData));
|
||||
}
|
||||
|
||||
TEST(MappedBlockStreamTest, TestWriteThenRead) {
|
||||
std::vector<uint8_t> DataBytes(10);
|
||||
MutableArrayRef<uint8_t> Data(DataBytes);
|
||||
const uint32_t Blocks[] = {2, 1, 0, 6, 3, 4, 5, 7, 9, 8};
|
||||
|
||||
DiscontiguousFile F(Blocks, Data);
|
||||
MappedBlockStreamImpl S(llvm::make_unique<IndexedStreamData>(0, F), F);
|
||||
|
||||
enum class MyEnum : uint32_t { Val1 = 2908234, Val2 = 120891234 };
|
||||
|
||||
uint16_t u16[] = {31468, 0};
|
||||
uint32_t u32[] = {890723408, 0};
|
||||
MyEnum Enum[] = {MyEnum::Val1, MyEnum::Val2};
|
||||
StringRef ZStr[] = {"Zero Str", ""};
|
||||
StringRef FStr[] = {"Fixed Str", ""};
|
||||
ArrayRef<uint8_t> byteArray[] = {{'1', '2'}, {'0', '0'}};
|
||||
ArrayRef<uint32_t> intArray[] = {{890723408, 29082234}, {0, 0}};
|
||||
|
||||
StreamReader Reader(S);
|
||||
StreamWriter Writer(S);
|
||||
EXPECT_NO_ERROR(Writer.writeInteger(u16[0]));
|
||||
EXPECT_NO_ERROR(Reader.readInteger(u16[1]));
|
||||
EXPECT_EQ(u16[0], u16[1]);
|
||||
EXPECT_EQ(DataBytes,
|
||||
std::vector<uint8_t>({0, 0x7A, 0xEC, 0, 0, 0, 0, 0, 0, 0}));
|
||||
|
||||
Reader.setOffset(0);
|
||||
Writer.setOffset(0);
|
||||
::memset(DataBytes.data(), 0, 10);
|
||||
EXPECT_NO_ERROR(Writer.writeInteger(u32[0]));
|
||||
EXPECT_NO_ERROR(Reader.readInteger(u32[1]));
|
||||
EXPECT_EQ(u32[0], u32[1]);
|
||||
EXPECT_EQ(DataBytes,
|
||||
std::vector<uint8_t>({0x17, 0x5C, 0x50, 0, 0, 0, 0x35, 0, 0, 0}));
|
||||
|
||||
Reader.setOffset(0);
|
||||
Writer.setOffset(0);
|
||||
::memset(DataBytes.data(), 0, 10);
|
||||
EXPECT_NO_ERROR(Writer.writeEnum(Enum[0]));
|
||||
EXPECT_NO_ERROR(Reader.readEnum(Enum[1]));
|
||||
EXPECT_EQ(Enum[0], Enum[1]);
|
||||
EXPECT_EQ(DataBytes,
|
||||
std::vector<uint8_t>({0x2C, 0x60, 0x4A, 0, 0, 0, 0, 0, 0, 0}));
|
||||
|
||||
Reader.setOffset(0);
|
||||
Writer.setOffset(0);
|
||||
::memset(DataBytes.data(), 0, 10);
|
||||
EXPECT_NO_ERROR(Writer.writeZeroString(ZStr[0]));
|
||||
EXPECT_NO_ERROR(Reader.readZeroString(ZStr[1]));
|
||||
EXPECT_EQ(ZStr[0], ZStr[1]);
|
||||
EXPECT_EQ(DataBytes, std::vector<uint8_t>(
|
||||
{'r', 'e', 'Z', ' ', 'S', 't', 'o', 'r', 0, 0}));
|
||||
|
||||
Reader.setOffset(0);
|
||||
Writer.setOffset(0);
|
||||
::memset(DataBytes.data(), 0, 10);
|
||||
EXPECT_NO_ERROR(Writer.writeFixedString(FStr[0]));
|
||||
EXPECT_NO_ERROR(Reader.readFixedString(FStr[1], FStr[0].size()));
|
||||
EXPECT_EQ(FStr[0], FStr[1]);
|
||||
EXPECT_EQ(DataBytes, std::vector<uint8_t>(
|
||||
{'x', 'i', 'F', 'd', ' ', 'S', 'e', 't', 0, 'r'}));
|
||||
|
||||
Reader.setOffset(0);
|
||||
Writer.setOffset(0);
|
||||
::memset(DataBytes.data(), 0, 10);
|
||||
EXPECT_NO_ERROR(Writer.writeArray(byteArray[0]));
|
||||
EXPECT_NO_ERROR(Reader.readArray(byteArray[1], byteArray[0].size()));
|
||||
EXPECT_EQ(byteArray[0], byteArray[1]);
|
||||
EXPECT_EQ(DataBytes,
|
||||
std::vector<uint8_t>({0, 0x32, 0x31, 0, 0, 0, 0, 0, 0, 0}));
|
||||
|
||||
Reader.setOffset(0);
|
||||
Writer.setOffset(0);
|
||||
::memset(DataBytes.data(), 0, 10);
|
||||
EXPECT_NO_ERROR(Writer.writeArray(intArray[0]));
|
||||
EXPECT_NO_ERROR(Reader.readArray(intArray[1], intArray[0].size()));
|
||||
EXPECT_EQ(intArray[0], intArray[1]);
|
||||
EXPECT_EQ(DataBytes, std::vector<uint8_t>({0x17, 0x5C, 0x50, 0x7A, 0xC2, 0xBB,
|
||||
0x35, 0x01, 0, 0}));
|
||||
}
|
||||
|
||||
TEST(MappedBlockStreamTest, TestWriteContiguousStreamRef) {
|
||||
std::vector<uint8_t> DestDataBytes(10);
|
||||
MutableArrayRef<uint8_t> DestData(DestDataBytes);
|
||||
const uint32_t DestBlocks[] = {2, 1, 0, 6, 3, 4, 5, 7, 9, 8};
|
||||
|
||||
std::vector<uint8_t> SrcDataBytes(10);
|
||||
MutableArrayRef<uint8_t> SrcData(SrcDataBytes);
|
||||
|
||||
DiscontiguousFile F(DestBlocks, DestData);
|
||||
MappedBlockStreamImpl DestStream(llvm::make_unique<IndexedStreamData>(0, F),
|
||||
F);
|
||||
|
||||
// First write "Test Str" into the source stream.
|
||||
ByteStream<true> SourceStream(SrcData);
|
||||
StreamWriter SourceWriter(SourceStream);
|
||||
EXPECT_NO_ERROR(SourceWriter.writeZeroString("Test Str"));
|
||||
EXPECT_EQ(SrcDataBytes, std::vector<uint8_t>(
|
||||
{'T', 'e', 's', 't', ' ', 'S', 't', 'r', 0, 0}));
|
||||
|
||||
// Then write the source stream into the dest stream.
|
||||
StreamWriter DestWriter(DestStream);
|
||||
EXPECT_NO_ERROR(DestWriter.writeStreamRef(SourceStream));
|
||||
EXPECT_EQ(DestDataBytes, std::vector<uint8_t>(
|
||||
{'s', 'e', 'T', ' ', 'S', 't', 't', 'r', 0, 0}));
|
||||
|
||||
// Then read the string back out of the dest stream.
|
||||
StringRef Result;
|
||||
StreamReader DestReader(DestStream);
|
||||
EXPECT_NO_ERROR(DestReader.readZeroString(Result));
|
||||
EXPECT_EQ(Result, "Test Str");
|
||||
}
|
||||
|
||||
TEST(MappedBlockStreamTest, TestWriteDiscontiguousStreamRef) {
|
||||
std::vector<uint8_t> DestDataBytes(10);
|
||||
MutableArrayRef<uint8_t> DestData(DestDataBytes);
|
||||
const uint32_t DestBlocks[] = {2, 1, 0, 6, 3, 4, 5, 7, 9, 8};
|
||||
|
||||
std::vector<uint8_t> SrcDataBytes(10);
|
||||
MutableArrayRef<uint8_t> SrcData(SrcDataBytes);
|
||||
const uint32_t SrcBlocks[] = {1, 0, 6, 3, 4, 5, 2, 7, 8, 9};
|
||||
|
||||
DiscontiguousFile DestFile(DestBlocks, DestData);
|
||||
DiscontiguousFile SrcFile(SrcBlocks, SrcData);
|
||||
|
||||
MappedBlockStreamImpl DestStream(
|
||||
llvm::make_unique<IndexedStreamData>(0, DestFile), DestFile);
|
||||
MappedBlockStreamImpl SrcStream(
|
||||
llvm::make_unique<IndexedStreamData>(0, SrcFile), SrcFile);
|
||||
|
||||
// First write "Test Str" into the source stream.
|
||||
StreamWriter SourceWriter(SrcStream);
|
||||
EXPECT_NO_ERROR(SourceWriter.writeZeroString("Test Str"));
|
||||
EXPECT_EQ(SrcDataBytes, std::vector<uint8_t>(
|
||||
{'e', 'T', 't', 't', ' ', 'S', 's', 'r', 0, 0}));
|
||||
|
||||
// Then write the source stream into the dest stream.
|
||||
StreamWriter DestWriter(DestStream);
|
||||
EXPECT_NO_ERROR(DestWriter.writeStreamRef(SrcStream));
|
||||
EXPECT_EQ(DestDataBytes, std::vector<uint8_t>(
|
||||
{'s', 'e', 'T', ' ', 'S', 't', 't', 'r', 0, 0}));
|
||||
|
||||
// Then read the string back out of the dest stream.
|
||||
StringRef Result;
|
||||
StreamReader DestReader(DestStream);
|
||||
EXPECT_NO_ERROR(DestReader.readZeroString(Result));
|
||||
EXPECT_EQ(Result, "Test Str");
|
||||
}
|
||||
|
||||
} // end anonymous namespace
|
||||
|
Loading…
Reference in New Issue
Block a user