Initial commit

This commit is contained in:
Joelrau 2021-01-01 09:28:48 +02:00
commit 2ab2ec9a07
5049 changed files with 1635415 additions and 0 deletions

150
.gitignore vendored Normal file
View File

@ -0,0 +1,150 @@
### Windows
# Windows image file caches
Thumbs.db
ehthumbs.db
# Folder config file
Desktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msm
*.msp
# Shortcuts
*.lnk
### OSX
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear on external disk
.Spotlight-V100
.Trashes
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### Visual Studio
# User-specific files
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Build results
build
# Visual Studio 2015 cache/options directory
.vs/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
*_i.c
*_p.c
*_i.h
*.ilk
*.meta
*.obj
*.pch
*.pdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*.log
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!*.[Cc]ache/
# Others
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.pfx
*.publishsettings
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
# SQL Server files
*.mdf
*.ldf
### IDA
*.id0
*.id1
*.id2
*.nam
*.til
### Custom user files
# User scripts
user*.bat
# Premake binary
#premake5.exe

34
.gitmodules vendored Normal file
View File

@ -0,0 +1,34 @@
[submodule "deps/GSL"]
path = deps/GSL
url = https://github.com/Microsoft/GSL.git
[submodule "deps/asmjit"]
path = deps/asmjit
url = https://github.com/asmjit/asmjit.git
[submodule "deps/rapidjson"]
path = deps/rapidjson
url = https://github.com/Tencent/rapidjson.git
[submodule "deps/udis86"]
path = deps/udis86
url = https://github.com/vmt/udis86.git
[submodule "deps/protobuf"]
path = deps/protobuf
url = https://github.com/google/protobuf.git
branch = 3.14.x
[submodule "deps/minhook"]
path = deps/minhook
url = https://github.com/TsudaKageyu/minhook.git
[submodule "deps/libtomcrypt"]
path = deps/libtomcrypt
url = https://github.com/libtom/libtomcrypt.git
branch = develop
[submodule "deps/libtommath"]
path = deps/libtommath
url = https://github.com/libtom/libtommath.git
branch = develop
[submodule "deps/zlib"]
path = deps/zlib
url = https://github.com/madler/zlib.git
branch = develop
[submodule "deps/discord-rpc"]
path = deps/discord-rpc
url = https://github.com/discord/discord-rpc.git

1
README.md Normal file
View File

@ -0,0 +1 @@
# s1-mod

34
deps/GSL/.clang-format vendored Normal file
View File

@ -0,0 +1,34 @@
ColumnLimit: 100
UseTab: Never
IndentWidth: 4
AccessModifierOffset: -4
NamespaceIndentation: Inner
BreakBeforeBraces: Custom
BraceWrapping:
AfterNamespace: true
AfterEnum: true
AfterStruct: true
AfterClass: true
SplitEmptyFunction: false
AfterControlStatement: true
AfterFunction: true
AfterUnion: true
BeforeElse: true
AlwaysBreakTemplateDeclarations: true
BreakConstructorInitializersBeforeComma: true
ConstructorInitializerAllOnOneLineOrOnePerLine: true
AllowShortBlocksOnASingleLine: true
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: true
AllowShortLoopsOnASingleLine: true
PointerAlignment: Left
AlignConsecutiveAssignments: false
AlignTrailingComments: true
SpaceAfterCStyleCast: true
CommentPragmas: '^ NO-FORMAT:'

50
deps/GSL/.github/workflows/android.yml vendored Normal file
View File

@ -0,0 +1,50 @@
name: CI_Android
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
Android:
runs-on: macos-latest
defaults:
run:
working-directory: build
steps:
- uses: actions/checkout@v2
- name: Create build directory
run: mkdir -p build
working-directory: .
- name: Start emulator
run: |
echo "y" | $ANDROID_HOME/tools/bin/sdkmanager --install 'system-images;android-24;default;x86_64'
echo "no" | $ANDROID_HOME/tools/bin/avdmanager create avd -n xamarin_android_emulator -k 'system-images;android-24;default;x86_64' --force
$ANDROID_HOME/emulator/emulator -list-avds
echo "Starting emulator"
# Start emulator in background
nohup $ANDROID_HOME/emulator/emulator -avd xamarin_android_emulator -no-snapshot > /dev/null 2>&1 &
echo "Emulator starting"
- name: Configure
run: cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_HOME/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=16 -DANDROID_ABI=x86_64 -DCMAKE_BUILD_TYPE=Debug ..
- name: Build
run: cmake --build . --parallel
- name: Wait for emulator ready
run: |
$ANDROID_HOME/platform-tools/adb wait-for-device shell 'while [[ -z $(getprop sys.boot_completed | tr -d '\r') ]]; do sleep 10; done; input keyevent 82'
$ANDROID_HOME/platform-tools/adb devices
$ANDROID_HOME/platform-tools/adb shell getprop ro.product.cpu.abi
echo "Emulator started"
- name: Deploy tests
run: |
adb push tests /data/local/tmp
adb shell find /data/local/tmp/tests -maxdepth 1 -exec chmod +x {} \\\;
- name: Test
run: adb shell find /data/local/tmp/tests -name "*_tests" -maxdepth 1 -exec {} \\\;

51
deps/GSL/.github/workflows/ios.yml vendored Normal file
View File

@ -0,0 +1,51 @@
name: CI_iOS
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
iOS:
runs-on: macos-latest
defaults:
run:
working-directory: build
steps:
- uses: actions/checkout@v2
- name: Create build directory
run: mkdir -p build
working-directory: .
- name: Configure
run: |
cmake \
-GXcode \
-DCMAKE_SYSTEM_NAME=iOS \
"-DCMAKE_OSX_ARCHITECTURES=arm64;x86_64" \
-DCMAKE_OSX_DEPLOYMENT_TARGET=8 \
-DCMAKE_TRY_COMPILE_TARGET_TYPE=STATIC_LIBRARY \
"-DMACOSX_BUNDLE_GUI_IDENTIFIER=GSL.\$(EXECUTABLE_NAME)" \
-DMACOSX_BUNDLE_BUNDLE_VERSION=3.0.1 \
-DMACOSX_BUNDLE_SHORT_VERSION_STRING=3.0.1 \
..
- name: Build
run: cmake --build . --parallel `sysctl -n hw.ncpu` --config Release -- -sdk iphonesimulator
- name: Start simulator
run: |
RUNTIME=`xcrun simctl list runtimes iOS -j|jq '.runtimes|last.identifier'`
UDID=`xcrun simctl list devices iPhone available -j|jq -r ".devices[$RUNTIME]|last.udid"`
xcrun simctl bootstatus $UDID -b
- name: Test
run: |
for TEST in `find tests/Release-iphonesimulator -depth 1 -name "*.app"`
do
xcrun simctl install booted $TEST
TEST_ID=`plutil -convert json -o - $TEST/Info.plist|jq -r ".CFBundleIdentifier"`
xcrun simctl launch --console booted $TEST_ID
xcrun simctl uninstall booted $TEST_ID
done

16
deps/GSL/.gitignore vendored Normal file
View File

@ -0,0 +1,16 @@
CMakeFiles
build
tests/CMakeFiles
tests/Debug
*.opensdf
*.sdf
tests/*tests.dir
*.vcxproj
*.vcxproj.filters
*.sln
*.tlog
Testing/Temporary/*.*
CMakeCache.txt
*.suo
.vs/
.vscode/

337
deps/GSL/.travis.yml vendored Normal file
View File

@ -0,0 +1,337 @@
language: cpp
notifications:
email: false
# Use Linux unless specified otherwise
os: linux
dist: bionic
cache:
directories:
- ${TRAVIS_BUILD_DIR}/deps
stages:
- name: Latest # Compiler with the latest major version
- name: Previous # Compilers with the major version Latest - 1
- name: Validation # run other jobs
jobs:
include:
##########################################################################
# Validate CMake configuration
##########################################################################
- name: CMake 3.1.3 - latest
stage: Validation
env: &CMAKE_VERSION_LIST
- CMAKE_VERSION: '"3.17.0 3.16.5 3.15.7 3.14.7 3.13.5 3.12.4 3.11.4 3.10.3 3.9.6 3.8.2 3.7.2 3.6.3 3.5.2 3.4.3 3.3.2 3.2.3 3.1.3"'
- GSL_CXX_STANDARD: 14
addons: # Get latest release (candidate)
apt:
sources:
- sourceline: 'deb https://apt.kitware.com/ubuntu/ bionic main'
key_url: 'https://apt.kitware.com/keys/kitware-archive-latest.asc'
- sourceline: 'deb https://apt.kitware.com/ubuntu/ bionic-rc main'
packages:
- cmake
script:
- |
cd ./build
( set -eu
for CMAKE in ${CMAKE_path[@]}; do test_CMake_generate $CMAKE; done
export CXX=clang++
for CMAKE in ${CMAKE_path[@]}; do test_CMake_generate $CMAKE; done
)
- name: CMake 3.2.3 - 3.17.0
stage: Validation
os: osx
osx_image: xcode11.3
env:
- CMAKE_VERSION: '"3.17.0 3.16.5 3.15.7 3.14.7 3.13.5 3.12.4 3.11.4 3.10.3 3.9.6 3.8.2 3.7.2 3.6.3 3.5.2 3.4.3 3.3.2 3.2.3"'
script:
- |
cd ./build
( set -eu
for CMAKE in ${CMAKE_path[@]}; do test_CMake_generate $CMAKE; done
)
##########################################################################
# AppleClang on OSX
##########################################################################
# Xcode 10.3
- name: AppleClang Xcode-10.3 C++14 Debug
stage: Previous
env: BUILD_TYPE=Debug GSL_CXX_STANDARD=14
os: osx
osx_image: xcode10.3 # AppleClang 10.0.1 same compiler as Xcode 10.2
- name: AppleClang Xcode-10.3 C++14 Release
env: BUILD_TYPE=Release GSL_CXX_STANDARD=14
os: osx
osx_image: xcode10.3
- name: AppleClang Xcode-10.3 C++17 Debug
env: BUILD_TYPE=Debug GSL_CXX_STANDARD=17
os: osx
osx_image: xcode10.3
- name: AppleClang Xcode-10.3 C++17 Release
env: BUILD_TYPE=Release GSL_CXX_STANDARD=17
os: osx
osx_image: xcode10.3
# Xcode 11.4
- name: AppleClang Xcode-11.4 C++17 Debug
stage: Latest
env: BUILD_TYPE=Debug GSL_CXX_STANDARD=17
os: osx
osx_image: xcode11.4
- name: AppleClang Xcode-11.4 C++17 Release
env: BUILD_TYPE=Release GSL_CXX_STANDARD=17
os: osx
osx_image: xcode11.4
- name: AppleClang Xcode-11.4 C++14 Debug
env: BUILD_TYPE=Debug GSL_CXX_STANDARD=14
os: osx
osx_image: xcode11.4
- name: AppleClang Xcode-11.4 C++14 Release
env: BUILD_TYPE=Release GSL_CXX_STANDARD=14
os: osx
osx_image: xcode11.4
##########################################################################
# Clang on Linux
##########################################################################
# Clang 9
- name: Clang-9 C++14 Debug
stage: Previous
env: CXX=clang++-9 BUILD_TYPE=Debug GSL_CXX_STANDARD=14
addons: &clang9
apt:
sources:
- sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main'
key_url: https://apt.llvm.org/llvm-snapshot.gpg.key
packages:
- clang-9
- name: Clang-9 C++14 Release
env: CXX=clang++-9 BUILD_TYPE=Release GSL_CXX_STANDARD=14
addons: *clang9
- name: Clang-9 C++17 Debug
env: CXX=clang++-9 BUILD_TYPE=Debug GSL_CXX_STANDARD=17
addons: *clang9
- name: Clang-9 C++17 Release
env: CXX=clang++-9 BUILD_TYPE=Release GSL_CXX_STANDARD=17
addons: *clang9
# Clang 10
- name: Clang-10 C++14 Debug
stage: Latest
env: CXX=clang++-10 BUILD_TYPE=Debug GSL_CXX_STANDARD=14
addons: &clang10
apt:
sources:
- sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-10 main'
key_url: https://apt.llvm.org/llvm-snapshot.gpg.key
packages:
- clang-10
- name: Clang-10 C++14 Release
env: CXX=clang++-10 BUILD_TYPE=Release GSL_CXX_STANDARD=14
addons: *clang10
- name: Clang-10 C++17 Debug
env: CXX=clang++-10 BUILD_TYPE=Debug GSL_CXX_STANDARD=17
addons: *clang10
- name: Clang-10 C++17 Release
env: CXX=clang++-10 BUILD_TYPE=Release GSL_CXX_STANDARD=17
addons: *clang10
##########################################################################
# GCC on Linux
##########################################################################
# GCC 8
- name: GCC-8 C++14 Debug
stage: Previous
env: CXX=g++-8 BUILD_TYPE=Debug GSL_CXX_STANDARD=14
addons: &gcc8
apt:
packages: g++-8
- name: GCC-8 C++14 Release
env: CXX=g++-8 BUILD_TYPE=Release GSL_CXX_STANDARD=14
addons: *gcc8
- name: GCC-8 C++17 Debug
env: CXX=g++-8 BUILD_TYPE=Debug GSL_CXX_STANDARD=17
addons: *gcc8
- name: GCC-8 C++17 Release
env: CXX=g++-8 BUILD_TYPE=Release GSL_CXX_STANDARD=17
addons: *gcc8
# GCC 9
- name: GCC-9 C++14 Debug
stage: Latest
env: CXX=g++-9 BUILD_TYPE=Debug GSL_CXX_STANDARD=14
addons: &gcc9
apt:
sources:
- sourceline: ppa:ubuntu-toolchain-r/test
packages:
- g++-9
- name: GCC-9 C++14 Release
env: CXX=g++-9 BUILD_TYPE=Release GSL_CXX_STANDARD=14
addons: *gcc9
- name: GCC-9 C++17 Debug
env: CXX=g++-9 BUILD_TYPE=Debug GSL_CXX_STANDARD=17
addons: *gcc9
- name: GCC-9 C++17 Release
env: CXX=g++-9 BUILD_TYPE=Release GSL_CXX_STANDARD=17
addons: *gcc9
before_install:
- |
# Configuration
JOBS=2 # Travis machines have 2 cores
# Dependencies required by the CI (cached directory)
DEPS_DIR="${TRAVIS_BUILD_DIR}/deps"
- |
# Setup
mkdir -p "${DEPS_DIR:?}" && cd "${DEPS_DIR:?}"
mkdir -p ~/tools && cd ~/tools
if [[ ${TRAVIS_OS_NAME:?} == "osx" ]]; then
export PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
fi
- |
# Helper functions
# usage: if [[ $(check_url '<url>') ]]; then ...
function check_url {( set +e
if [[ "$1" =~ 'github.com' ]]; then # check for first byte
if curl --fail --silent --output /dev/null --connect-timeout 12 --range 0-0 "$1"
then echo true; fi
else # request head
if curl --fail --silent --output /dev/null --connect-timeout 12 --head "$1"
then echo true; fi
fi
return
)}
install:
############################################################################
# Install a different CMake version (or several)
############################################################################
- |
# Install CMake versions
( set -euo pipefail
if [[ ${CMAKE_VERSION:-} ]]; then
if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then
OS="Linux"; EXT="sh"
if [[ ! ("${CMAKE_VERSION:-}" =~ .+[' '].+) ]]; then
# Single entry -> default CMake version
CMAKE_DEFAULT_DIR="/usr/local"
fi
elif [[ "${TRAVIS_OS_NAME}" == "osx" ]]; then OS="Darwin"; EXT="tar.gz"
else echo "CMake install not supported for this OS."; exit 1
fi
CMAKE_INSTALLER="install-cmake.${EXT}"
fi
for VERSION in ${CMAKE_VERSION:-}; do
CMAKE_URL="https://github.com/Kitware/CMake/releases/download/v${VERSION}/cmake-${VERSION}-${OS}-x86_64.${EXT}"
if [[ $(check_url "$CMAKE_URL") ]]; then
curl -sSL ${CMAKE_URL} -o ${CMAKE_INSTALLER}
CMAKE_DIR="${CMAKE_DEFAULT_DIR:-"${HOME}/tools/cmake-${VERSION}"}"
mkdir -p ${CMAKE_DIR}
if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then
chmod +x ${CMAKE_INSTALLER}
sudo ./${CMAKE_INSTALLER} --prefix=${CMAKE_DIR} --skip-license
else # OSX
mkdir -p ./CMake_tmp
tar --extract --gzip --file=${CMAKE_INSTALLER} --directory=./CMake_tmp
mv ./CMake_tmp/*/CMake.app/Contents/* ${CMAKE_DIR}
fi
rm --recursive --force ./CMake_tmp ${CMAKE_INSTALLER}
else echo 'Invalid url!'; echo "Version: ${VERSION}"
fi
done
)
if [[ ${CMAKE_VERSION:-} && "${TRAVIS_OS_NAME:?}" == "osx" && ! ("${CMAKE_VERSION:-}" =~ .+[' '].+) ]]
then # Single entry -> default CMake version
export PATH=${HOME}/tools/cmake-${CMAKE_VERSION:?}/bin:$PATH
fi
CMAKE_path=("cmake") # start with installed CMake version
for VERSION in ${CMAKE_VERSION:-}; do
tmp_path="$HOME/tools/cmake-${VERSION:?}/bin/cmake"
if [[ -x "$(command -v ${tmp_path:?})" ]]; then CMAKE_path+=("${tmp_path:?}"); fi
done
function test_CMake_generate {
# $1: cmake or full path to cmake
shopt -s extglob
if [[ "$1" == "cmake" || -x "$(command -v $1)" && "$1" =~ .*cmake$ ]]; then
echo "----------------"
$1 --version
echo "Configuration = ${BUILD_TYPE:-Debug}"
$1 -DCMAKE_BUILD_TYPE=${BUILD_TYPE:-Debug} ${CMAKE_GEN_FLAGS[@]:?} ..
rm -rf !(tests/googletest-*)
if [[ ! ${BUILD_TYPE:-} ]]; then echo "" && echo "Configuration = Release"
$1 -DCMAKE_BUILD_TYPE=Release ${CMAKE_GEN_FLAGS[@]:?} ..
rm -rf !(tests/googletest-*)
fi
else echo "Non existing command: $1"
fi
}
- |
# CMake wrapper (Trusty, Xenial & Bionic); restore default behaviour.
if [[ "${TRAVIS_OS_NAME:?}" == "linux" &&
"$(lsb_release --codename)" =~ (trusty|xenial|bionic)$ ]]
then
if [[ -x $(command -v /usr/local/bin/cmake) ]]; then
function cmake { command /usr/local/bin/cmake $@; }
elif [[ -x $(command -v /usr/bin/cmake) ]]; then
function cmake { command /usr/bin/cmake $@; }
fi
fi
############################################################################
# [linux]: Install the right version of libc++
# Based on https://github.com/ldionne/hana/blob/master/.travis.yml
############################################################################
- |
LLVM_INSTALL=${DEPS_DIR:?}/llvm/install
# if in linux and compiler clang and llvm not installed
if [[ "${TRAVIS_OS_NAME:?}" == "linux" && "${CXX%%+*}" == "clang" && -n "$(ls -A ${LLVM_INSTALL:?})" ]]; then
if [[ "${CXX}" == "clang++-3.6" ]]; then LLVM_VERSION="3.6.2";
elif [[ "${CXX}" == "clang++-3.7" ]]; then LLVM_VERSION="3.7.1";
elif [[ "${CXX}" == "clang++-3.8" ]]; then LLVM_VERSION="3.8.1";
elif [[ "${CXX}" == "clang++-3.9" ]]; then LLVM_VERSION="3.9.1";
fi
LLVM_URL="http://llvm.org/releases/${LLVM_VERSION}/llvm-${LLVM_VERSION}.src.tar.xz"
LIBCXX_URL="http://llvm.org/releases/${LLVM_VERSION}/libcxx-${LLVM_VERSION}.src.tar.xz"
LIBCXXABI_URL="http://llvm.org/releases/${LLVM_VERSION}/libcxxabi-${LLVM_VERSION}.src.tar.xz"
mkdir -p llvm llvm/build llvm/projects/libcxx llvm/projects/libcxxabi
travis_retry wget -O - ${LLVM_URL} | tar --strip-components=1 -xJ -C llvm
travis_retry wget -O - ${LIBCXX_URL} | tar --strip-components=1 -xJ -C llvm/projects/libcxx
travis_retry wget -O - ${LIBCXXABI_URL} | tar --strip-components=1 -xJ -C llvm/projects/libcxxabi
(cd llvm/build && cmake .. -DCMAKE_INSTALL_PREFIX=${LLVM_INSTALL})
(cd llvm/build/projects/libcxx && make install -j2)
(cd llvm/build/projects/libcxxabi && make install -j2)
export CXXFLAGS="-isystem ${LLVM_INSTALL}/include/c++/v1"
export LDFLAGS="-L ${LLVM_INSTALL}/lib -l c++ -l c++abi"
export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${LLVM_INSTALL}/lib"
fi
before_script:
- |
cd "${TRAVIS_BUILD_DIR:?}"
mkdir build && cd build
if [[ ${GSL_CXX_STANDARD:-} ]]; then
CMAKE_GEN_FLAGS=("-DGSL_CXX_STANDARD=$GSL_CXX_STANDARD")
fi
CMAKE_GEN_FLAGS+=("-Wdev -Werror=dev --warn-uninitialized")
script:
# generate build files
- cmake .. -DCMAKE_BUILD_TYPE=${BUILD_TYPE:?} ${CMAKE_GEN_FLAGS[@]:?}
# build and run tests
- cmake --build . -- -j${JOBS}
- ctest --output-on-failure -j${JOBS}

91
deps/GSL/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,91 @@
cmake_minimum_required(VERSION 3.1.3...3.16)
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
include(guidelineSupportLibrary)
project(GSL VERSION 3.1.0 LANGUAGES CXX)
# Use GNUInstallDirs to provide the right locations on all platforms
include(GNUInstallDirs)
# Creates a library GSL which is an interface (header files only)
add_library(GSL INTERFACE)
# NOTE: If you want to use GSL prefer to link against GSL using this alias target
# EX:
# target_link_libraries(foobar PRIVATE Microsoft.GSL::GSL)
#
# Add Microsoft.GSL::GSL alias for GSL so that dependents can be agnostic about
# whether GSL was added via `add_subdirectory` or `find_package`
add_library(Microsoft.GSL::GSL ALIAS GSL)
# Determine whether this is a standalone project or included by other projects
set(GSL_STANDALONE_PROJECT OFF)
if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
set(GSL_STANDALONE_PROJECT ON)
endif()
# This GSL implementation generally assumes a platform that implements C++14 support.
set(gsl_min_cxx_standard "14")
if (GSL_STANDALONE_PROJECT)
gsl_set_default_cxx_standard(${gsl_min_cxx_standard})
else()
gsl_client_set_cxx_standard(${gsl_min_cxx_standard})
endif()
# add include folders to the library and targets that consume it
# the SYSTEM keyword suppresses warnings for users of the library
if(GSL_STANDALONE_PROJECT)
target_include_directories(GSL INTERFACE
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
)
else()
target_include_directories(GSL SYSTEM INTERFACE
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
)
endif()
# Add natvis file
gsl_add_native_visualizer_support()
install(TARGETS GSL EXPORT Microsoft.GSLConfig)
install(
DIRECTORY include/gsl
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
)
# Make library importable by other projects
install(EXPORT Microsoft.GSLConfig NAMESPACE Microsoft.GSL:: DESTINATION ${CMAKE_INSTALL_DATADIR}/cmake/Microsoft.GSL)
export(TARGETS GSL NAMESPACE Microsoft.GSL:: FILE Microsoft.GSLConfig.cmake)
# Add find_package() versioning support. The version for
# generated Microsoft.GSLConfigVersion.cmake will be used from
# last project() command. The version's compatibility is set between all
# minor versions (as it was in prev. GSL releases).
include(CMakePackageConfigHelpers)
if(${CMAKE_VERSION} VERSION_LESS "3.14.0")
write_basic_package_version_file(
${CMAKE_CURRENT_BINARY_DIR}/Microsoft.GSLConfigVersion.cmake
COMPATIBILITY SameMajorVersion
)
else()
write_basic_package_version_file(
${CMAKE_CURRENT_BINARY_DIR}/Microsoft.GSLConfigVersion.cmake
COMPATIBILITY SameMajorVersion
ARCH_INDEPENDENT
)
endif()
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/Microsoft.GSLConfigVersion.cmake DESTINATION ${CMAKE_INSTALL_DATADIR}/cmake/Microsoft.GSL)
option(GSL_TEST "Generate tests." ${GSL_STANDALONE_PROJECT})
if (GSL_TEST)
enable_testing()
if(IOS)
add_compile_definitions(
GTEST_HAS_DEATH_TEST=1
)
endif()
add_subdirectory(tests)
endif()

18
deps/GSL/CMakeSettings.json vendored Normal file
View File

@ -0,0 +1,18 @@
{
"configurations": [
{
"name": "x64-Debug",
"generator": "Ninja",
"configurationType": "Debug",
"inheritEnvironments": [
"msvc_x64_x64"
],
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
"cmakeCommandArgs": "-DGSL_CXX_STANDARD=17",
"buildCommandArgs": "-v",
"ctestCommandArgs": "",
"codeAnalysisRuleset": "CppCoreCheckRules.ruleset"
}
]
}

29
deps/GSL/CONTRIBUTING.md vendored Normal file
View File

@ -0,0 +1,29 @@
## Contributing to the Guidelines Support Library
The Guidelines Support Library (GSL) contains functions and types that are suggested for use by the
[C++ Core Guidelines](https://github.com/isocpp/CppCoreGuidelines). GSL design changes are made only as a result of modifications to the Guidelines.
GSL is accepting contributions that improve or refine any of the types in this library as well as ports to other platforms. Changes should have an issue
tracking the suggestion that has been approved by the maintainers. Your pull request should include a link to the bug that you are fixing. If you've submitted
a PR, please post a comment in the associated issue to avoid duplication of effort.
## Legal
You will need to complete a Contributor License Agreement (CLA). Briefly, this agreement testifies that you are granting us and the community permission to
use the submitted change according to the terms of the project's license, and that the work being submitted is under appropriate copyright.
Please submit a Contributor License Agreement (CLA) before submitting a pull request. You may visit https://cla.microsoft.com to sign digitally.
## Housekeeping
Your pull request should:
* Include a description of what your change intends to do
* Be a child commit of a reasonably recent commit in the **master** branch
* Requests need not be a single commit, but should be a linear sequence of commits (i.e. no merge commits in your PR)
* It is desirable, but not necessary, for the tests to pass at each commit. Please see [README.md](./README.md) for instructions to build the test suite.
* Have clear commit messages
* e.g. "Fix issue", "Add tests for type", etc.
* Include appropriate tests
* Tests should include reasonable permutations of the target fix/change
* Include baseline changes with your change
* All changed code must have 100% code coverage
* To avoid line ending issues, set `autocrlf = input` and `whitespace = cr-at-eol` in your git configuration

59
deps/GSL/GSL.natvis vendored Normal file
View File

@ -0,0 +1,59 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
This will make GitHub and some editors recognize this code as XML:
vim: syntax=xml
-->
<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
<!-- These types are from the gsl_assert header. -->
<Type Name="gsl::fail_fast">
<!-- na hides the address, otherwise it would appear as 0x.... "Message" -->
<DisplayString>{_Data._What,nasb}</DisplayString>
</Type>
<!-- These types are from the gsl_util header. -->
<Type Name="gsl::final_action&lt;*&gt;">
<DisplayString>{{ invoke = {invoke_}, action = {f_} }}</DisplayString>
<Expand>
<Item Name="[invoke]">invoke_</Item>
<Item Name="[callback]">f_</Item>
</Expand>
</Type>
<Type Name="gsl::span&lt;*, *&gt;">
<DisplayString>{{ extent = {storage_.size_} }}</DisplayString>
<Expand>
<ArrayItems>
<Size>storage_.size_</Size>
<ValuePointer>storage_.data_</ValuePointer>
</ArrayItems>
</Expand>
</Type>
<Type Name="gsl::basic_string_span&lt;*, *&gt;">
<DisplayString>{span_.storage_.data_,[span_.storage_.size_]na}</DisplayString>
<Expand>
<Item Name="[size]">span_.storage_.size_</Item>
<ArrayItems>
<Size>span_.storage_.size_</Size>
<ValuePointer>span_.storage_.data_</ValuePointer>
</ArrayItems>
</Expand>
</Type>
<Type Name="gsl::basic_zstring_span&lt;*, *&gt;">
<DisplayString>{span_.storage_.data_,[span_.storage_.size_]na}</DisplayString>
<Expand>
<Item Name="[size]">span_.storage_.size_</Item>
<ArrayItems>
<Size>span_.storage_.size_</Size>
<ValuePointer>span_.storage_.data_</ValuePointer>
</ArrayItems>
</Expand>
</Type>
<!-- These types are from the gsl header. -->
<Type Name="gsl::not_null&lt;*&gt;">
<!-- We can always dereference this since it's an invariant. -->
<DisplayString>value = {*ptr_}</DisplayString>
</Type>
</AutoVisualizer>

21
deps/GSL/LICENSE vendored Normal file
View File

@ -0,0 +1,21 @@
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
This code is licensed under the MIT License (MIT).
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

182
deps/GSL/README.md vendored Normal file
View File

@ -0,0 +1,182 @@
# GSL: Guidelines Support Library
[![Build Status](https://dev.azure.com/cppstat/GSL/_apis/build/status/microsoft.GSL?branchName=master)](https://dev.azure.com/cppstat/GSL/_build/latest?definitionId=1&branchName=master)
The Guidelines Support Library (GSL) contains functions and types that are suggested for use by the
[C++ Core Guidelines](https://github.com/isocpp/CppCoreGuidelines) maintained by the [Standard C++ Foundation](https://isocpp.org).
This repo contains Microsoft's implementation of GSL.
The entire implementation is provided inline in the headers under the [gsl](./include/gsl) directory. The implementation generally assumes a platform that implements C++14 support.
While some types have been broken out into their own headers (e.g. [gsl/span](./include/gsl/span)),
it is simplest to just include [gsl/gsl](./include/gsl/gsl) and gain access to the entire library.
> NOTE: We encourage contributions that improve or refine any of the types in this library as well as ports to
other platforms. Please see [CONTRIBUTING.md](./CONTRIBUTING.md) for more information about contributing.
# Project Code of Conduct
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
# Usage of Third Party Libraries
This project makes use of the [Google Test](https://github.com/google/googletest) testing library. Please see the [ThirdPartyNotices.txt](./ThirdPartyNotices.txt) file for details regarding the licensing of Google Test.
# Supported features
## Microsoft GSL implements the following from the C++ Core Guidelines:
Feature | Supported? | Description
-----------------------------------|:----------:|-------------
[**1. Views**][cg-views] | |
owner | &#x2611; | an alias for a raw pointer
not_null | &#x2611; | restricts a pointer / smart pointer to hold non-null values
span | &#x2611; | a view over a contiguous sequence of memory. Based on the standardized verison of `std::span`, however `gsl::span` enforces bounds checking. See the [wiki](https://github.com/microsoft/GSL/wiki/gsl::span-and-std::span) for additional information.
span_p | &#x2610; | spans a range starting from a pointer to the first place for which the predicate is true
basic_zstring | &#x2611; | A pointer to a C-string (zero-terminated array) with a templated char type
zstring | &#x2611; | An alias to `basic_zstring` with a char type of char
czstring | &#x2611; | An alias to `basic_zstring` with a char type of const char
wzstring | &#x2611; | An alias to `basic_zstring` with a char type of wchar_t
cwzstring | &#x2611; | An alias to `basic_zstring` with a char type of const wchar_t
u16zstring | &#x2611; | An alias to `basic_zstring` with a char type of char16_t
cu16zstring | &#x2611; | An alias to `basic_zstring` with a char type of const char16_t
u32zstring | &#x2611; | An alias to `basic_zstring` with a char type of char32_t
cu32zstring | &#x2611; | An alias to `basic_zstring` with a char type of const char32_t
[**2. Owners**][cg-owners] | |
unique_ptr | &#x2611; | an alias to `std::unique_ptr`
shared_ptr | &#x2611; | an alias to `std::shared_ptr`
stack_array | &#x2610; | a stack-allocated array
dyn_array | &#x2610; | a heap-allocated array
[**3. Assertions**][cg-assertions] | |
Expects | &#x2611; | a precondition assertion; on failure it terminates
Ensures | &#x2611; | a postcondition assertion; on failure it terminates
[**4. Utilities**][cg-utilities] | |
move_owner | &#x2610; | a helper function that moves one `owner` to the other
byte | &#x2611; | either an alias to std::byte or a byte type
final_action | &#x2611; | a RAII style class that invokes a functor on its destruction
finally | &#x2611; | a helper function instantiating `final_action`
GSL_SUPPRESS | &#x2611; | a macro that takes an argument and turns it into `[[gsl::suppress(x)]]` or `[[gsl::suppress("x")]]`
[[implicit]] | &#x2610; | a "marker" to put on single-argument constructors to explicitly make them non-explicit
index | &#x2611; | a type to use for all container and array indexing (currently an alias for std::ptrdiff_t)
joining_thread | &#x2610; | a RAII style version of `std::thread` that joins
narrow | &#x2611; | a checked version of narrow_cast; it can throw `narrowing_error`
narrow_cast | &#x2611; | a narrowing cast for values and a synonym for static_cast
narrowing_error | &#x2611; | a custom exception type thrown by `narrow()`
[**5. Concepts**][cg-concepts] | &#x2610; |
## The following features do not exist in or have been removed from the C++ Core Guidelines:
Feature | Supported? | Description
-----------------------------------|:----------:|-------------
strict_not_null | &#x2611; | A stricter version of `not_null` with explicit constructors
multi_span | &#x2610; | Deprecated. Multi-dimensional span.
strided_span | &#x2610; | Deprecated. Support for this type has been discontinued.
basic_string_span | &#x2610; | Deprecated. Like `span` but for strings with a templated char type
string_span | &#x2610; | Deprecated. An alias to `basic_string_span` with a char type of char
cstring_span | &#x2610; | Deprecated. An alias to `basic_string_span` with a char type of const char
wstring_span | &#x2610; | Deprecated. An alias to `basic_string_span` with a char type of wchar_t
cwstring_span | &#x2610; | Deprecated. An alias to `basic_string_span` with a char type of const wchar_t
u16string_span | &#x2610; | Deprecated. An alias to `basic_string_span` with a char type of char16_t
cu16string_span | &#x2610; | Deprecated. An alias to `basic_string_span` with a char type of const char16_t
u32string_span | &#x2610; | Deprecated. An alias to `basic_string_span` with a char type of char32_t
cu32string_span | &#x2610; | Deprecated. An alias to `basic_string_span` with a char type of const char32_t
This is based on [CppCoreGuidelines semi-specification](https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#gsl-guidelines-support-library).
[cg-views]: https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#gslview-views
[cg-owners]: https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#gslowner-ownership-pointers
[cg-assertions]: https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#gslassert-assertions
[cg-utilities]: https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#gslutil-utilities
[cg-concepts]: https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#gslconcept-concepts
# Quick Start
## Supported Compilers
The GSL officially supports the current and previous major release of MSVC, GCC, Clang, and XCode's Apple-Clang.
See our latest test results for the most up-to-date list of supported configurations.
Compiler |Toolset Versions Currently Tested
:------- |--:
XCode |11.4 & 10.3
GCC |9 & 8
Clang |11 & 10
Visual Studio with MSVC | VS2017 (15.9) & VS2019 (16.4)
Visual Studio with LLVM | VS2017 (Clang 9) & VS2019 (Clang 10)
---
If you successfully port GSL to another platform, we would love to hear from you!
- Submit an issue specifying the platform and target.
- Consider contributing your changes by filing a pull request with any necessary changes.
- If at all possible, add a CI/CD step and add the button to the table below!
Target | CI/CD Status
:------- | -----------:
iOS | ![CI_iOS](https://github.com/microsoft/GSL/workflows/CI_iOS/badge.svg)
Android | ![CI_Android](https://github.com/microsoft/GSL/workflows/CI_Android/badge.svg)
Note: These CI/CD steps are run with each pull request, however failures in them are non-blocking.
## Building the tests
To build the tests, you will require the following:
* [CMake](http://cmake.org), version 3.1.3 (3.2.3 for AppleClang) or later to be installed and in your PATH.
These steps assume the source code of this repository has been cloned into a directory named `c:\GSL`.
1. Create a directory to contain the build outputs for a particular architecture (we name it c:\GSL\build-x86 in this example).
cd GSL
md build-x86
cd build-x86
2. Configure CMake to use the compiler of your choice (you can see a list by running `cmake --help`).
cmake -G "Visual Studio 15 2017" c:\GSL
3. Build the test suite (in this case, in the Debug configuration, Release is another good choice).
cmake --build . --config Debug
4. Run the test suite.
ctest -C Debug
All tests should pass - indicating your platform is fully supported and you are ready to use the GSL types!
## Building GSL - Using vcpkg
You can download and install GSL using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager:
git clone https://github.com/Microsoft/vcpkg.git
cd vcpkg
./bootstrap-vcpkg.sh
./vcpkg integrate install
vcpkg install ms-gsl
The GSL port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository.
## Using the libraries
As the types are entirely implemented inline in headers, there are no linking requirements.
You can copy the [gsl](./include/gsl) directory into your source tree so it is available
to your compiler, then include the appropriate headers in your program.
Alternatively set your compiler's *include path* flag to point to the GSL development folder (`c:\GSL\include` in the example above) or installation folder (after running the install). Eg.
MSVC++
/I c:\GSL\include
GCC/clang
-I$HOME/dev/GSL/include
Include the library using:
#include <gsl/gsl>
## Usage in CMake
The library provides a Config file for CMake, once installed it can be found via
find_package(Microsoft.GSL CONFIG)
Which, when successful, will add library target called `Microsoft.GSL::GSL` which you can use via the usual
`target_link_libraries` mechanism.
## Debugging visualization support
For Visual Studio users, the file [GSL.natvis](./GSL.natvis) in the root directory of the repository can be added to your project if you would like more helpful visualization of GSL types in the Visual Studio debugger than would be offered by default.

41
deps/GSL/ThirdPartyNotices.txt vendored Normal file
View File

@ -0,0 +1,41 @@
THIRD-PARTY SOFTWARE NOTICES AND INFORMATION
Do Not Translate or Localize
GSL: Guidelines Support Library incorporates third party material from the projects listed below.
-------------------------------------------------------------------------------
Software: Google Test
Owner: Google Inc.
Source URL: github.com/google/googletest
License: BSD 3 - Clause
Text:
Copyright 2008, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------------------

68
deps/GSL/azure-pipelines.yml vendored Normal file
View File

@ -0,0 +1,68 @@
trigger:
- master
pr:
autoCancel: true
# GCC
stages:
- stage: GCC
dependsOn: []
variables:
- name: CC
value: gcc
- name: CXX
value: g++
jobs:
- template: ./pipelines/jobs.yml
parameters:
jobName: 'Validate GCC latest'
imageName: ubuntu-20.04
- template: ./pipelines/jobs.yml
parameters:
jobName: 'Validate GCC Previous'
imageName: ubuntu-18.04
# Clang
- stage: Clang
dependsOn: []
variables:
- name: CC
value: clang
- name: CXX
value: clang++
jobs:
- template: ./pipelines/jobs.yml
parameters:
jobName: 'Validate Clang latest'
imageName: ubuntu-20.04
- template: ./pipelines/jobs.yml
parameters:
jobName: 'Validate Clang Previous'
imageName: ubuntu-18.04
# MSVC
- stage: MSVC
dependsOn: []
jobs:
- template: ./pipelines/jobs.yml
parameters:
jobName: 'Validate MSVC latest'
imageName: windows-latest
- template: ./pipelines/jobs.yml
parameters:
jobName: 'Validate MSVC Previous'
imageName: vs2017-win2016
# Apple-Clang
- stage: Apple_Clang
dependsOn: []
jobs:
- template: ./pipelines/jobs.yml
parameters:
jobName: 'Validate Apple-Clang latest'
imageName: macos-10.15
- template: ./pipelines/jobs.yml
parameters:
jobName: 'Validate Apple-Clang Previous'
imageName: macos-10.14

View File

@ -0,0 +1,77 @@
# This cmake module is meant to hold helper functions/macros
# that make maintaining the cmake build system much easier.
# This is especially helpful since gsl needs to provide coverage
# for multiple versions of cmake.
#
# Any functions/macros should have a gsl_* prefix to avoid problems
if (DEFINED guideline_support_library_include_guard)
return()
endif()
set(guideline_support_library_include_guard ON)
function(gsl_set_default_cxx_standard min_cxx_standard)
set(GSL_CXX_STANDARD "${min_cxx_standard}" CACHE STRING "Use c++ standard")
set(GSL_CXX_STD "cxx_std_${GSL_CXX_STANDARD}")
if (MSVC)
set(GSL_CXX_STD_OPT "-std:c++${GSL_CXX_STANDARD}")
else()
set(GSL_CXX_STD_OPT "-std=c++${GSL_CXX_STANDARD}")
endif()
# when minimum version required is 3.8.0 remove if below
# both branches do exactly the same thing
if (CMAKE_VERSION VERSION_LESS 3.7.9)
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG("${GSL_CXX_STD_OPT}" COMPILER_SUPPORTS_CXX_STANDARD)
if(COMPILER_SUPPORTS_CXX_STANDARD)
target_compile_options(GSL INTERFACE "${GSL_CXX_STD_OPT}")
else()
message(FATAL_ERROR "The compiler ${CMAKE_CXX_COMPILER} has no c++${GSL_CXX_STANDARD} support. Please use a different C++ compiler.")
endif()
else()
target_compile_features(GSL INTERFACE "${GSL_CXX_STD}")
# on *nix systems force the use of -std=c++XX instead of -std=gnu++XX (default)
set(CMAKE_CXX_EXTENSIONS OFF)
endif()
endfunction()
# The best way for a project to specify the GSL's C++ standard is by the client specifying
# the CMAKE_CXX_STANDARD. However, this isn't always ideal. Since the CMAKE_CXX_STANDARD is
# tied to the cmake version. And many projects have low cmake minimums.
#
# So provide an alternative approach in case that doesn't work.
function(gsl_client_set_cxx_standard min_cxx_standard)
if (DEFINED CMAKE_CXX_STANDARD)
if (${CMAKE_CXX_STANDARD} VERSION_LESS ${min_cxx_standard})
message(FATAL_ERROR "GSL: Requires at least CXX standard ${min_cxx_standard}, user provided ${CMAKE_CXX_STANDARD}")
endif()
# Set the GSL standard to what the client desires
set(GSL_CXX_STANDARD "${CMAKE_CXX_STANDARD}" PARENT_SCOPE)
# Exit out early to avoid extra unneccessary work
return()
endif()
# Otherwise pick a reasonable default
gsl_set_default_cxx_standard(${min_cxx_standard})
endfunction()
# Adding the GSL.natvis files improves the debugging experience for users of this library.
function(gsl_add_native_visualizer_support)
if (CMAKE_VERSION VERSION_GREATER 3.7.8)
if (MSVC_IDE)
option(GSL_VS_ADD_NATIVE_VISUALIZERS "Configure project to use Visual Studio native visualizers" TRUE)
else()
set(GSL_VS_ADD_NATIVE_VISUALIZERS FALSE CACHE INTERNAL "Native visualizers are Visual Studio extension" FORCE)
endif()
# add natvis file to the library so it will automatically be loaded into Visual Studio
if(GSL_VS_ADD_NATIVE_VISUALIZERS)
target_sources(GSL INTERFACE $<BUILD_INTERFACE:${GSL_SOURCE_DIR}/GSL.natvis>)
endif()
endif()
endfunction()

63
deps/GSL/include/gsl/algorithm vendored Normal file
View File

@ -0,0 +1,63 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef GSL_ALGORITHM_H
#define GSL_ALGORITHM_H
#include <gsl/assert> // for Expects
#include <gsl/span> // for dynamic_extent, span
#include <algorithm> // for copy_n
#include <cstddef> // for ptrdiff_t
#include <type_traits> // for is_assignable
#ifdef _MSC_VER
#pragma warning(push)
// turn off some warnings that are noisy about our Expects statements
#pragma warning(disable : 4127) // conditional expression is constant
#pragma warning(disable : 4996) // unsafe use of std::copy_n
#endif // _MSC_VER
namespace gsl
{
// Note: this will generate faster code than std::copy using span iterator in older msvc+stl
// not necessary for msvc since VS2017 15.8 (_MSC_VER >= 1915)
template <class SrcElementType, std::size_t SrcExtent, class DestElementType,
std::size_t DestExtent>
void copy(span<SrcElementType, SrcExtent> src, span<DestElementType, DestExtent> dest)
{
static_assert(std::is_assignable<decltype(*dest.data()), decltype(*src.data())>::value,
"Elements of source span can not be assigned to elements of destination span");
static_assert(SrcExtent == dynamic_extent || DestExtent == dynamic_extent ||
(SrcExtent <= DestExtent),
"Source range is longer than target range");
Expects(dest.size() >= src.size());
// clang-format off
GSL_SUPPRESS(stl.1) // NO-FORMAT: attribute
// clang-format on
std::copy_n(src.data(), src.size(), dest.data());
}
} // namespace gsl
#ifdef _MSC_VER
#pragma warning(pop)
#endif // _MSC_VER
#endif // GSL_ALGORITHM_H

135
deps/GSL/include/gsl/assert vendored Normal file
View File

@ -0,0 +1,135 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef GSL_CONTRACTS_H
#define GSL_CONTRACTS_H
//
// Temporary until MSVC STL supports no-exceptions mode.
// Currently terminate is a no-op in this mode, so we add termination behavior back
//
#if defined(_MSC_VER) && (defined(_KERNEL_MODE) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS))
#define GSL_MSVC_USE_STL_NOEXCEPTION_WORKAROUND
#include <intrin.h>
#define RANGE_CHECKS_FAILURE 0
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winvalid-noreturn"
#endif // defined(__clang__)
#else // defined(_MSC_VER) && (defined(_KERNEL_MODE) || (defined(_HAS_EXCEPTIONS) &&
// !_HAS_EXCEPTIONS))
#include <exception>
#endif // defined(_MSC_VER) && (defined(_KERNEL_MODE) || (defined(_HAS_EXCEPTIONS) &&
// !_HAS_EXCEPTIONS))
//
// make suppress attributes parse for some compilers
// Hopefully temporary until suppression standardization occurs
//
#if defined(__clang__)
#define GSL_SUPPRESS(x) [[gsl::suppress("x")]]
#else
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#define GSL_SUPPRESS(x) [[gsl::suppress(x)]]
#else
#define GSL_SUPPRESS(x)
#endif // _MSC_VER
#endif // __clang__
#define GSL_STRINGIFY_DETAIL(x) #x
#define GSL_STRINGIFY(x) GSL_STRINGIFY_DETAIL(x)
#if defined(__clang__) || defined(__GNUC__)
#define GSL_LIKELY(x) __builtin_expect(!!(x), 1)
#define GSL_UNLIKELY(x) __builtin_expect(!!(x), 0)
#else
#define GSL_LIKELY(x) (!!(x))
#define GSL_UNLIKELY(x) (!!(x))
#endif // defined(__clang__) || defined(__GNUC__)
//
// GSL_ASSUME(cond)
//
// Tell the optimizer that the predicate cond must hold. It is unspecified
// whether or not cond is actually evaluated.
//
#ifdef _MSC_VER
#define GSL_ASSUME(cond) __assume(cond)
#elif defined(__GNUC__)
#define GSL_ASSUME(cond) ((cond) ? static_cast<void>(0) : __builtin_unreachable())
#else
#define GSL_ASSUME(cond) static_cast<void>((cond) ? 0 : 0)
#endif
//
// GSL.assert: assertions
//
namespace gsl
{
namespace details
{
#if defined(GSL_MSVC_USE_STL_NOEXCEPTION_WORKAROUND)
typedef void(__cdecl* terminate_handler)();
// clang-format off
GSL_SUPPRESS(f.6) // NO-FORMAT: attribute
// clang-format on
[[noreturn]] inline void __cdecl default_terminate_handler()
{
__fastfail(RANGE_CHECKS_FAILURE);
}
inline gsl::details::terminate_handler& get_terminate_handler() noexcept
{
static terminate_handler handler = &default_terminate_handler;
return handler;
}
#endif // defined(GSL_MSVC_USE_STL_NOEXCEPTION_WORKAROUND)
[[noreturn]] inline void terminate() noexcept
{
#if defined(GSL_MSVC_USE_STL_NOEXCEPTION_WORKAROUND)
(*gsl::details::get_terminate_handler())();
#else
std::terminate();
#endif // defined(GSL_MSVC_USE_STL_NOEXCEPTION_WORKAROUND)
}
} // namespace details
} // namespace gsl
#define GSL_CONTRACT_CHECK(type, cond) \
(GSL_LIKELY(cond) ? static_cast<void>(0) : gsl::details::terminate())
#define Expects(cond) GSL_CONTRACT_CHECK("Precondition", cond)
#define Ensures(cond) GSL_CONTRACT_CHECK("Postcondition", cond)
#if defined(GSL_MSVC_USE_STL_NOEXCEPTION_WORKAROUND) && defined(__clang__)
#pragma clang diagnostic pop
#endif
#endif // GSL_CONTRACTS_H

213
deps/GSL/include/gsl/byte vendored Normal file
View File

@ -0,0 +1,213 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef GSL_BYTE_H
#define GSL_BYTE_H
//
// make suppress attributes work for some compilers
// Hopefully temporary until suppression standardization occurs
//
#if defined(__clang__)
#define GSL_SUPPRESS(x) [[gsl::suppress("x")]]
#else
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#define GSL_SUPPRESS(x) [[gsl::suppress(x)]]
#else
#define GSL_SUPPRESS(x)
#endif // _MSC_VER
#endif // __clang__
#include <type_traits>
// VS2017 15.8 added support for the __cpp_lib_byte definition
// To do: drop _HAS_STD_BYTE when support for pre 15.8 expires
#ifdef _MSC_VER
#pragma warning(push)
// Turn MSVC /analyze rules that generate too much noise. TODO: fix in the tool.
#pragma warning(disable : 26493) // don't use c-style casts // TODO: MSVC suppression in templates
// does not always work
#ifndef GSL_USE_STD_BYTE
// this tests if we are under MSVC and the standard lib has std::byte and it is enabled
#if (defined(_HAS_STD_BYTE) && _HAS_STD_BYTE) || \
(defined(__cpp_lib_byte) && __cpp_lib_byte >= 201603)
#define GSL_USE_STD_BYTE 1
#else // (defined(_HAS_STD_BYTE) && _HAS_STD_BYTE) || (defined(__cpp_lib_byte) && __cpp_lib_byte >=
// 201603)
#define GSL_USE_STD_BYTE 0
#endif // (defined(_HAS_STD_BYTE) && _HAS_STD_BYTE) || (defined(__cpp_lib_byte) && __cpp_lib_byte >=
// 201603)
#endif // GSL_USE_STD_BYTE
#else // _MSC_VER
#ifndef GSL_USE_STD_BYTE
#include <cstddef> /* __cpp_lib_byte */
// this tests if we are under GCC or Clang with enough -std=c++1z power to get us std::byte
// also check if libc++ version is sufficient (> 5.0) or libstdc++ actually contains std::byte
#if defined(__cplusplus) && (__cplusplus >= 201703L) && \
(defined(__cpp_lib_byte) && (__cpp_lib_byte >= 201603) || \
defined(_LIBCPP_VERSION) && (_LIBCPP_VERSION >= 5000))
#define GSL_USE_STD_BYTE 1
#else // defined(__cplusplus) && (__cplusplus >= 201703L) &&
// (defined(__cpp_lib_byte) && (__cpp_lib_byte >= 201603) ||
// defined(_LIBCPP_VERSION) && (_LIBCPP_VERSION >= 5000))
#define GSL_USE_STD_BYTE 0
#endif // defined(__cplusplus) && (__cplusplus >= 201703L) &&
// (defined(__cpp_lib_byte) && (__cpp_lib_byte >= 201603) ||
// defined(_LIBCPP_VERSION) && (_LIBCPP_VERSION >= 5000))
#endif // GSL_USE_STD_BYTE
#endif // _MSC_VER
// Use __may_alias__ attribute on gcc and clang
#if defined __clang__ || (defined(__GNUC__) && __GNUC__ > 5)
#define byte_may_alias __attribute__((__may_alias__))
#else // defined __clang__ || defined __GNUC__
#define byte_may_alias
#endif // defined __clang__ || defined __GNUC__
#if GSL_USE_STD_BYTE
#include <cstddef>
#endif
namespace gsl
{
#if GSL_USE_STD_BYTE
using std::byte;
using std::to_integer;
#else // GSL_USE_STD_BYTE
// This is a simple definition for now that allows
// use of byte within span<> to be standards-compliant
enum class byte_may_alias byte : unsigned char
{
};
template <class IntegerType, class = std::enable_if_t<std::is_integral<IntegerType>::value>>
constexpr byte& operator<<=(byte& b, IntegerType shift) noexcept
{
return b = byte(static_cast<unsigned char>(b) << shift);
}
template <class IntegerType, class = std::enable_if_t<std::is_integral<IntegerType>::value>>
constexpr byte operator<<(byte b, IntegerType shift) noexcept
{
return byte(static_cast<unsigned char>(b) << shift);
}
template <class IntegerType, class = std::enable_if_t<std::is_integral<IntegerType>::value>>
constexpr byte& operator>>=(byte& b, IntegerType shift) noexcept
{
return b = byte(static_cast<unsigned char>(b) >> shift);
}
template <class IntegerType, class = std::enable_if_t<std::is_integral<IntegerType>::value>>
constexpr byte operator>>(byte b, IntegerType shift) noexcept
{
return byte(static_cast<unsigned char>(b) >> shift);
}
constexpr byte& operator|=(byte& l, byte r) noexcept
{
return l = byte(static_cast<unsigned char>(l) | static_cast<unsigned char>(r));
}
constexpr byte operator|(byte l, byte r) noexcept
{
return byte(static_cast<unsigned char>(l) | static_cast<unsigned char>(r));
}
constexpr byte& operator&=(byte& l, byte r) noexcept
{
return l = byte(static_cast<unsigned char>(l) & static_cast<unsigned char>(r));
}
constexpr byte operator&(byte l, byte r) noexcept
{
return byte(static_cast<unsigned char>(l) & static_cast<unsigned char>(r));
}
constexpr byte& operator^=(byte& l, byte r) noexcept
{
return l = byte(static_cast<unsigned char>(l) ^ static_cast<unsigned char>(r));
}
constexpr byte operator^(byte l, byte r) noexcept
{
return byte(static_cast<unsigned char>(l) ^ static_cast<unsigned char>(r));
}
constexpr byte operator~(byte b) noexcept { return byte(~static_cast<unsigned char>(b)); }
template <class IntegerType, class = std::enable_if_t<std::is_integral<IntegerType>::value>>
constexpr IntegerType to_integer(byte b) noexcept
{
return static_cast<IntegerType>(b);
}
#endif // GSL_USE_STD_BYTE
template <bool E, typename T>
constexpr byte to_byte_impl(T t) noexcept
{
static_assert(
E, "gsl::to_byte(t) must be provided an unsigned char, otherwise data loss may occur. "
"If you are calling to_byte with an integer contant use: gsl::to_byte<t>() version.");
return static_cast<byte>(t);
}
template <>
// NOTE: need suppression since c++14 does not allow "return {t}"
// GSL_SUPPRESS(type.4) // NO-FORMAT: attribute // TODO: suppression does not work
constexpr byte to_byte_impl<true, unsigned char>(unsigned char t) noexcept
{
return byte(t);
}
template <typename T>
constexpr byte to_byte(T t) noexcept
{
return to_byte_impl<std::is_same<T, unsigned char>::value, T>(t);
}
template <int I>
constexpr byte to_byte() noexcept
{
static_assert(I >= 0 && I <= 255,
"gsl::byte only has 8 bits of storage, values must be in range 0-255");
return static_cast<byte>(I);
}
} // namespace gsl
#ifdef _MSC_VER
#pragma warning(pop)
#endif // _MSC_VER
#endif // GSL_BYTE_H

32
deps/GSL/include/gsl/gsl vendored Normal file
View File

@ -0,0 +1,32 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef GSL_GSL_H
#define GSL_GSL_H
#include <gsl/algorithm> // copy
#include <gsl/assert> // Ensures/Expects
#include <gsl/byte> // byte
#include <gsl/pointers> // owner, not_null
#include <gsl/span> // span
#include <gsl/string_span> // zstring, string_span, zstring_builder...
#include <gsl/util> // finally()/narrow_cast()...
#ifdef __cpp_exceptions
#include <gsl/narrow> // narrow()
#endif
#endif // GSL_GSL_H

3
deps/GSL/include/gsl/gsl_algorithm vendored Normal file
View File

@ -0,0 +1,3 @@
#pragma once
#pragma message("This header will soon be removed. Use <gsl/algorithm> instead of <gsl/gsl_algorithm>")
#include <gsl/algorithm>

3
deps/GSL/include/gsl/gsl_assert vendored Normal file
View File

@ -0,0 +1,3 @@
#pragma once
#pragma message("This header will soon be removed. Use <gsl/assert> instead of <gsl/gsl_assert>")
#include <gsl/assert>

3
deps/GSL/include/gsl/gsl_byte vendored Normal file
View File

@ -0,0 +1,3 @@
#pragma once
#pragma message("This header will soon be removed. Use <gsl/byte> instead of <gsl/gsl_byte>")
#include <gsl/byte>

3
deps/GSL/include/gsl/gsl_narrow vendored Normal file
View File

@ -0,0 +1,3 @@
#pragma once
#pragma message("This header will soon be removed. Use <gsl/narrow> instead of <gsl/gsl_narrow>")
#include <gsl/narrow>

3
deps/GSL/include/gsl/gsl_util vendored Normal file
View File

@ -0,0 +1,3 @@
#pragma once
#pragma message("This header will soon be removed. Use <gsl/util> instead of <gsl/gsl_util>")
#include <gsl/util>

49
deps/GSL/include/gsl/narrow vendored Normal file
View File

@ -0,0 +1,49 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef GSL_NARROW_H
#define GSL_NARROW_H
#include <gsl/assert> // for Expects
#include <gsl/util> // for narrow_cast
namespace gsl
{
struct narrowing_error : public std::exception
{
const char* what() const noexcept override { return "narrowing_error"; }
};
// narrow() : a checked version of narrow_cast() that throws if the cast changed the value
template <class T, class U>
// clang-format off
GSL_SUPPRESS(type.1) // NO-FORMAT: attribute
GSL_SUPPRESS(f.6) // NO-FORMAT: attribute // TODO: MSVC /analyze does not recognise noexcept(false)
// clang-format on
constexpr T narrow(U u) noexcept(false)
{
constexpr const bool is_different_signedness =
(std::is_signed<T>::value != std::is_signed<U>::value);
const T t = narrow_cast<T>(u);
if (static_cast<U>(t) != u || (is_different_signedness && ((t < T{}) != (u < U{}))))
{
throw narrowing_error{};
}
return t;
}
} // namespace gsl
#endif // GSL_NARROW_H

308
deps/GSL/include/gsl/pointers vendored Normal file
View File

@ -0,0 +1,308 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef GSL_POINTERS_H
#define GSL_POINTERS_H
#include <gsl/assert> // for Ensures, Expects
#include <algorithm> // for forward
#include <cstddef> // for ptrdiff_t, nullptr_t, size_t
#include <memory> // for shared_ptr, unique_ptr
#include <system_error> // for hash
#include <type_traits> // for enable_if_t, is_convertible, is_assignable
#if !defined(GSL_NO_IOSTREAMS)
#include <iosfwd> // for ostream
#endif // !defined(GSL_NO_IOSTREAMS)
namespace gsl
{
//
// GSL.owner: ownership pointers
//
using std::shared_ptr;
using std::unique_ptr;
//
// owner
//
// owner<T> is designed as a bridge for code that must deal directly with owning pointers for some
// reason
//
// T must be a pointer type
// - disallow construction from any type other than pointer type
//
template <class T, class = std::enable_if_t<std::is_pointer<T>::value>>
using owner = T;
//
// not_null
//
// Restricts a pointer or smart pointer to only hold non-null values.
//
// Has zero size overhead over T.
//
// If T is a pointer (i.e. T == U*) then
// - allow construction from U*
// - disallow construction from nullptr_t
// - disallow default construction
// - ensure construction from null U* fails
// - allow implicit conversion to U*
//
template <class T>
class not_null
{
public:
static_assert(std::is_convertible<decltype(std::declval<T>() != nullptr), bool>::value,
"T cannot be compared to nullptr.");
template <typename U, typename = std::enable_if_t<std::is_convertible<U, T>::value>>
constexpr not_null(U&& u) : ptr_(std::forward<U>(u))
{
Expects(ptr_ != nullptr);
}
template <typename = std::enable_if_t<!std::is_same<std::nullptr_t, T>::value>>
constexpr not_null(T u) : ptr_(std::move(u))
{
Expects(ptr_ != nullptr);
}
template <typename U, typename = std::enable_if_t<std::is_convertible<U, T>::value>>
constexpr not_null(const not_null<U>& other) : not_null(other.get())
{}
not_null(const not_null& other) = default;
not_null& operator=(const not_null& other) = default;
constexpr std::conditional_t<std::is_copy_constructible<T>::value, T, const T&> get() const
{
Ensures(ptr_ != nullptr);
return ptr_;
}
constexpr operator T() const { return get(); }
constexpr decltype(auto) operator->() const { return get(); }
constexpr decltype(auto) operator*() const { return *get(); }
// prevents compilation when someone attempts to assign a null pointer constant
not_null(std::nullptr_t) = delete;
not_null& operator=(std::nullptr_t) = delete;
// unwanted operators...pointers only point to single objects!
not_null& operator++() = delete;
not_null& operator--() = delete;
not_null operator++(int) = delete;
not_null operator--(int) = delete;
not_null& operator+=(std::ptrdiff_t) = delete;
not_null& operator-=(std::ptrdiff_t) = delete;
void operator[](std::ptrdiff_t) const = delete;
private:
T ptr_;
};
template <class T>
auto make_not_null(T&& t) noexcept
{
return not_null<std::remove_cv_t<std::remove_reference_t<T>>>{std::forward<T>(t)};
}
#if !defined(GSL_NO_IOSTREAMS)
template <class T>
std::ostream& operator<<(std::ostream& os, const not_null<T>& val)
{
os << val.get();
return os;
}
#endif // !defined(GSL_NO_IOSTREAMS)
template <class T, class U>
auto operator==(const not_null<T>& lhs,
const not_null<U>& rhs) noexcept(noexcept(lhs.get() == rhs.get()))
-> decltype(lhs.get() == rhs.get())
{
return lhs.get() == rhs.get();
}
template <class T, class U>
auto operator!=(const not_null<T>& lhs,
const not_null<U>& rhs) noexcept(noexcept(lhs.get() != rhs.get()))
-> decltype(lhs.get() != rhs.get())
{
return lhs.get() != rhs.get();
}
template <class T, class U>
auto operator<(const not_null<T>& lhs,
const not_null<U>& rhs) noexcept(noexcept(lhs.get() < rhs.get()))
-> decltype(lhs.get() < rhs.get())
{
return lhs.get() < rhs.get();
}
template <class T, class U>
auto operator<=(const not_null<T>& lhs,
const not_null<U>& rhs) noexcept(noexcept(lhs.get() <= rhs.get()))
-> decltype(lhs.get() <= rhs.get())
{
return lhs.get() <= rhs.get();
}
template <class T, class U>
auto operator>(const not_null<T>& lhs,
const not_null<U>& rhs) noexcept(noexcept(lhs.get() > rhs.get()))
-> decltype(lhs.get() > rhs.get())
{
return lhs.get() > rhs.get();
}
template <class T, class U>
auto operator>=(const not_null<T>& lhs,
const not_null<U>& rhs) noexcept(noexcept(lhs.get() >= rhs.get()))
-> decltype(lhs.get() >= rhs.get())
{
return lhs.get() >= rhs.get();
}
// more unwanted operators
template <class T, class U>
std::ptrdiff_t operator-(const not_null<T>&, const not_null<U>&) = delete;
template <class T>
not_null<T> operator-(const not_null<T>&, std::ptrdiff_t) = delete;
template <class T>
not_null<T> operator+(const not_null<T>&, std::ptrdiff_t) = delete;
template <class T>
not_null<T> operator+(std::ptrdiff_t, const not_null<T>&) = delete;
} // namespace gsl
namespace std
{
template <class T>
struct hash<gsl::not_null<T>>
{
std::size_t operator()(const gsl::not_null<T>& value) const { return hash<T>{}(value.get()); }
};
} // namespace std
namespace gsl
{
//
// strict_not_null
//
// Restricts a pointer or smart pointer to only hold non-null values,
//
// - provides a strict (i.e. explicit constructor from T) wrapper of not_null
// - to be used for new code that wishes the design to be cleaner and make not_null
// checks intentional, or in old code that would like to make the transition.
//
// To make the transition from not_null, incrementally replace not_null
// by strict_not_null and fix compilation errors
//
// Expect to
// - remove all unneeded conversions from raw pointer to not_null and back
// - make API clear by specifying not_null in parameters where needed
// - remove unnecessary asserts
//
template <class T>
class strict_not_null : public not_null<T>
{
public:
template <typename U, typename = std::enable_if_t<std::is_convertible<U, T>::value>>
constexpr explicit strict_not_null(U&& u) : not_null<T>(std::forward<U>(u))
{}
template <typename = std::enable_if_t<!std::is_same<std::nullptr_t, T>::value>>
constexpr explicit strict_not_null(T u) : not_null<T>(u)
{}
template <typename U, typename = std::enable_if_t<std::is_convertible<U, T>::value>>
constexpr strict_not_null(const not_null<U>& other) : not_null<T>(other)
{}
template <typename U, typename = std::enable_if_t<std::is_convertible<U, T>::value>>
constexpr strict_not_null(const strict_not_null<U>& other) : not_null<T>(other)
{}
strict_not_null(strict_not_null&& other) = default;
strict_not_null(const strict_not_null& other) = default;
strict_not_null& operator=(const strict_not_null& other) = default;
strict_not_null& operator=(const not_null<T>& other)
{
not_null<T>::operator=(other);
return *this;
}
// prevents compilation when someone attempts to assign a null pointer constant
strict_not_null(std::nullptr_t) = delete;
strict_not_null& operator=(std::nullptr_t) = delete;
// unwanted operators...pointers only point to single objects!
strict_not_null& operator++() = delete;
strict_not_null& operator--() = delete;
strict_not_null operator++(int) = delete;
strict_not_null operator--(int) = delete;
strict_not_null& operator+=(std::ptrdiff_t) = delete;
strict_not_null& operator-=(std::ptrdiff_t) = delete;
void operator[](std::ptrdiff_t) const = delete;
};
// more unwanted operators
template <class T, class U>
std::ptrdiff_t operator-(const strict_not_null<T>&, const strict_not_null<U>&) = delete;
template <class T>
strict_not_null<T> operator-(const strict_not_null<T>&, std::ptrdiff_t) = delete;
template <class T>
strict_not_null<T> operator+(const strict_not_null<T>&, std::ptrdiff_t) = delete;
template <class T>
strict_not_null<T> operator+(std::ptrdiff_t, const strict_not_null<T>&) = delete;
template <class T>
auto make_strict_not_null(T&& t) noexcept
{
return strict_not_null<std::remove_cv_t<std::remove_reference_t<T>>>{std::forward<T>(t)};
}
#if (defined(__cpp_deduction_guides) && (__cpp_deduction_guides >= 201611L))
// deduction guides to prevent the ctad-maybe-unsupported warning
template <class T>
not_null(T) -> not_null<T>;
template <class T>
strict_not_null(T) -> strict_not_null<T>;
#endif // ( defined(__cpp_deduction_guides) && (__cpp_deduction_guides >= 201611L) )
} // namespace gsl
namespace std
{
template <class T>
struct hash<gsl::strict_not_null<T>>
{
std::size_t operator()(const gsl::strict_not_null<T>& value) const
{
return hash<T>{}(value.get());
}
};
} // namespace std
#endif // GSL_POINTERS_H

826
deps/GSL/include/gsl/span vendored Normal file
View File

@ -0,0 +1,826 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef GSL_SPAN_H
#define GSL_SPAN_H
#include <gsl/assert> // for Expects
#include <gsl/byte> // for byte
#include <gsl/util> // for narrow_cast
#include <array> // for array
#include <cstddef> // for ptrdiff_t, size_t, nullptr_t
#include <iterator> // for reverse_iterator, distance, random_access_...
#include <type_traits> // for enable_if_t, declval, is_convertible, inte...
#if defined(_MSC_VER) && !defined(__clang__)
#pragma warning(push)
// turn off some warnings that are noisy about our Expects statements
#pragma warning(disable : 4127) // conditional expression is constant
#pragma warning( \
disable : 4146) // unary minus operator applied to unsigned type, result still unsigned
#pragma warning(disable : 4702) // unreachable code
// Turn MSVC /analyze rules that generate too much noise. TODO: fix in the tool.
#pragma warning(disable : 26495) // uninitalized member when constructor calls constructor
#pragma warning(disable : 26446) // parser bug does not allow attributes on some templates
#endif // _MSC_VER
// See if we have enough C++17 power to use a static constexpr data member
// without needing an out-of-line definition
#if !(defined(__cplusplus) && (__cplusplus >= 201703L))
#define GSL_USE_STATIC_CONSTEXPR_WORKAROUND
#endif // !(defined(__cplusplus) && (__cplusplus >= 201703L))
// GCC 7 does not like the signed unsigned missmatch (size_t ptrdiff_t)
// While there is a conversion from signed to unsigned, it happens at
// compiletime, so the compiler wouldn't have to warn indiscriminately, but
// could check if the source value actually doesn't fit into the target type
// and only warn in those cases.
#if defined(__GNUC__) && __GNUC__ > 6
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-conversion"
#endif
namespace gsl
{
// [views.constants], constants
constexpr const std::size_t dynamic_extent = narrow_cast<std::size_t>(-1);
template <class ElementType, std::size_t Extent = dynamic_extent>
class span;
// implementation details
namespace details
{
template <class T>
struct is_span_oracle : std::false_type
{
};
template <class ElementType, std::size_t Extent>
struct is_span_oracle<gsl::span<ElementType, Extent>> : std::true_type
{
};
template <class T>
struct is_span : public is_span_oracle<std::remove_cv_t<T>>
{
};
template <class T>
struct is_std_array_oracle : std::false_type
{
};
template <class ElementType, std::size_t Extent>
struct is_std_array_oracle<std::array<ElementType, Extent>> : std::true_type
{
};
template <class T>
struct is_std_array : is_std_array_oracle<std::remove_cv_t<T>>
{
};
template <std::size_t From, std::size_t To>
struct is_allowed_extent_conversion
: std::integral_constant<bool, From == To || To == dynamic_extent>
{
};
template <class From, class To>
struct is_allowed_element_type_conversion
: std::integral_constant<bool, std::is_convertible<From (*)[], To (*)[]>::value>
{
};
template <class Type>
class span_iterator
{
public:
using iterator_category = std::random_access_iterator_tag;
using value_type = std::remove_cv_t<Type>;
using difference_type = std::ptrdiff_t;
using pointer = Type*;
using reference = Type&;
#ifdef _MSC_VER
using _Unchecked_type = pointer;
#endif // _MSC_VER
constexpr span_iterator() = default;
constexpr span_iterator(pointer begin, pointer end, pointer current)
: begin_(begin), end_(end), current_(current)
{}
constexpr operator span_iterator<const Type>() const noexcept
{
return {begin_, end_, current_};
}
constexpr reference operator*() const noexcept
{
Expects(begin_ && end_);
Expects(begin_ <= current_ && current_ < end_);
return *current_;
}
constexpr pointer operator->() const noexcept
{
Expects(begin_ && end_);
Expects(begin_ <= current_ && current_ < end_);
return current_;
}
constexpr span_iterator& operator++() noexcept
{
Expects(begin_ && current_ && end_);
Expects(current_ < end_);
// clang-format off
GSL_SUPPRESS(bounds.1) // NO-FORMAT: attribute
// clang-format on
++current_;
return *this;
}
constexpr span_iterator operator++(int) noexcept
{
span_iterator ret = *this;
++*this;
return ret;
}
constexpr span_iterator& operator--() noexcept
{
Expects(begin_ && end_);
Expects(begin_ < current_);
--current_;
return *this;
}
constexpr span_iterator operator--(int) noexcept
{
span_iterator ret = *this;
--*this;
return ret;
}
constexpr span_iterator& operator+=(const difference_type n) noexcept
{
if (n != 0) Expects(begin_ && current_ && end_);
if (n > 0) Expects(end_ - current_ >= n);
if (n < 0) Expects(current_ - begin_ >= -n);
// clang-format off
GSL_SUPPRESS(bounds.1) // NO-FORMAT: attribute
// clang-format on
current_ += n;
return *this;
}
constexpr span_iterator operator+(const difference_type n) const noexcept
{
span_iterator ret = *this;
ret += n;
return ret;
}
friend constexpr span_iterator operator+(const difference_type n,
const span_iterator& rhs) noexcept
{
return rhs + n;
}
constexpr span_iterator& operator-=(const difference_type n) noexcept
{
if (n != 0) Expects(begin_ && current_ && end_);
if (n > 0) Expects(current_ - begin_ >= n);
if (n < 0) Expects(end_ - current_ >= -n);
current_ -= n;
return *this;
}
constexpr span_iterator operator-(const difference_type n) const noexcept
{
span_iterator ret = *this;
ret -= n;
return ret;
}
template <
class Type2,
std::enable_if_t<std::is_same<std::remove_cv_t<Type2>, value_type>::value, int> = 0>
constexpr difference_type operator-(const span_iterator<Type2>& rhs) const noexcept
{
Expects(begin_ == rhs.begin_ && end_ == rhs.end_);
return current_ - rhs.current_;
}
constexpr reference operator[](const difference_type n) const noexcept
{
return *(*this + n);
}
template <
class Type2,
std::enable_if_t<std::is_same<std::remove_cv_t<Type2>, value_type>::value, int> = 0>
constexpr bool operator==(const span_iterator<Type2>& rhs) const noexcept
{
Expects(begin_ == rhs.begin_ && end_ == rhs.end_);
return current_ == rhs.current_;
}
template <
class Type2,
std::enable_if_t<std::is_same<std::remove_cv_t<Type2>, value_type>::value, int> = 0>
constexpr bool operator!=(const span_iterator<Type2>& rhs) const noexcept
{
return !(*this == rhs);
}
template <
class Type2,
std::enable_if_t<std::is_same<std::remove_cv_t<Type2>, value_type>::value, int> = 0>
constexpr bool operator<(const span_iterator<Type2>& rhs) const noexcept
{
Expects(begin_ == rhs.begin_ && end_ == rhs.end_);
return current_ < rhs.current_;
}
template <
class Type2,
std::enable_if_t<std::is_same<std::remove_cv_t<Type2>, value_type>::value, int> = 0>
constexpr bool operator>(const span_iterator<Type2>& rhs) const noexcept
{
return rhs < *this;
}
template <
class Type2,
std::enable_if_t<std::is_same<std::remove_cv_t<Type2>, value_type>::value, int> = 0>
constexpr bool operator<=(const span_iterator<Type2>& rhs) const noexcept
{
return !(rhs < *this);
}
template <
class Type2,
std::enable_if_t<std::is_same<std::remove_cv_t<Type2>, value_type>::value, int> = 0>
constexpr bool operator>=(const span_iterator<Type2>& rhs) const noexcept
{
return !(*this < rhs);
}
#ifdef _MSC_VER
// MSVC++ iterator debugging support; allows STL algorithms in 15.8+
// to unwrap span_iterator to a pointer type after a range check in STL
// algorithm calls
friend constexpr void _Verify_range(span_iterator lhs, span_iterator rhs) noexcept
{ // test that [lhs, rhs) forms a valid range inside an STL algorithm
Expects(lhs.begin_ == rhs.begin_ // range spans have to match
&& lhs.end_ == rhs.end_ &&
lhs.current_ <= rhs.current_); // range must not be transposed
}
constexpr void _Verify_offset(const difference_type n) const noexcept
{ // test that *this + n is within the range of this call
if (n != 0) Expects(begin_ && current_ && end_);
if (n > 0) Expects(end_ - current_ >= n);
if (n < 0) Expects(current_ - begin_ >= -n);
}
// clang-format off
GSL_SUPPRESS(bounds.1) // NO-FORMAT: attribute
// clang-format on
constexpr pointer _Unwrapped() const noexcept
{ // after seeking *this to a high water mark, or using one of the
// _Verify_xxx functions above, unwrap this span_iterator to a raw
// pointer
return current_;
}
// Tell the STL that span_iterator should not be unwrapped if it can't
// validate in advance, even in release / optimized builds:
#if defined(GSL_USE_STATIC_CONSTEXPR_WORKAROUND)
static constexpr const bool _Unwrap_when_unverified = false;
#else
static constexpr bool _Unwrap_when_unverified = false;
#endif
// clang-format off
GSL_SUPPRESS(con.3) // NO-FORMAT: attribute // TODO: false positive
// clang-format on
constexpr void _Seek_to(const pointer p) noexcept
{ // adjust the position of *this to previously verified location p
// after _Unwrapped
current_ = p;
}
#endif
pointer begin_ = nullptr;
pointer end_ = nullptr;
pointer current_ = nullptr;
};
template <std::size_t Ext>
class extent_type
{
public:
using size_type = std::size_t;
constexpr extent_type() noexcept = default;
constexpr explicit extent_type(extent_type<dynamic_extent>);
constexpr explicit extent_type(size_type size) { Expects(size == Ext); }
constexpr size_type size() const noexcept { return Ext; }
private:
#if defined(GSL_USE_STATIC_CONSTEXPR_WORKAROUND)
static constexpr const size_type size_ = Ext; // static size equal to Ext
#else
static constexpr size_type size_ = Ext; // static size equal to Ext
#endif
};
template <>
class extent_type<dynamic_extent>
{
public:
using size_type = std::size_t;
template <size_type Other>
constexpr explicit extent_type(extent_type<Other> ext) : size_(ext.size())
{}
constexpr explicit extent_type(size_type size) : size_(size)
{
Expects(size != dynamic_extent);
}
constexpr size_type size() const noexcept { return size_; }
private:
size_type size_;
};
template <std::size_t Ext>
constexpr extent_type<Ext>::extent_type(extent_type<dynamic_extent> ext)
{
Expects(ext.size() == Ext);
}
template <class ElementType, std::size_t Extent, std::size_t Offset, std::size_t Count>
struct calculate_subspan_type
{
using type = span<ElementType, Count != dynamic_extent
? Count
: (Extent != dynamic_extent ? Extent - Offset : Extent)>;
};
} // namespace details
// [span], class template span
template <class ElementType, std::size_t Extent>
class span
{
public:
// constants and types
using element_type = ElementType;
using value_type = std::remove_cv_t<ElementType>;
using size_type = std::size_t;
using pointer = element_type*;
using const_pointer = const element_type*;
using reference = element_type&;
using const_reference = const element_type&;
using difference_type = std::ptrdiff_t;
using iterator = details::span_iterator<ElementType>;
using reverse_iterator = std::reverse_iterator<iterator>;
#if defined(GSL_USE_STATIC_CONSTEXPR_WORKAROUND)
static constexpr const size_type extent{Extent};
#else
static constexpr size_type extent{Extent};
#endif
// [span.cons], span constructors, copy, assignment, and destructor
template <bool Dependent = false,
// "Dependent" is needed to make "std::enable_if_t<Dependent || Extent == 0 || Extent
// == dynamic_extent>" SFINAE, since "std::enable_if_t<Extent == 0 || Extent ==
// dynamic_extent>" is ill-formed when Extent is greater than 0.
class = std::enable_if_t<(Dependent ||
details::is_allowed_extent_conversion<0, Extent>::value)>>
constexpr span() noexcept : storage_(nullptr, details::extent_type<0>())
{}
template <std::size_t MyExtent = Extent, std::enable_if_t<MyExtent != dynamic_extent, int> = 0>
constexpr explicit span(pointer ptr, size_type count) noexcept : storage_(ptr, count)
{
Expects(count == Extent);
}
template <std::size_t MyExtent = Extent, std::enable_if_t<MyExtent == dynamic_extent, int> = 0>
constexpr span(pointer ptr, size_type count) noexcept : storage_(ptr, count)
{}
template <std::size_t MyExtent = Extent, std::enable_if_t<MyExtent != dynamic_extent, int> = 0>
constexpr explicit span(pointer firstElem, pointer lastElem) noexcept
: storage_(firstElem, narrow_cast<std::size_t>(lastElem - firstElem))
{
Expects(lastElem - firstElem == static_cast<difference_type>(Extent));
}
template <std::size_t MyExtent = Extent, std::enable_if_t<MyExtent == dynamic_extent, int> = 0>
constexpr span(pointer firstElem, pointer lastElem) noexcept
: storage_(firstElem, narrow_cast<std::size_t>(lastElem - firstElem))
{}
template <std::size_t N,
std::enable_if_t<details::is_allowed_extent_conversion<N, Extent>::value, int> = 0>
constexpr span(element_type (&arr)[N]) noexcept
: storage_(KnownNotNull{arr}, details::extent_type<N>())
{}
template <
class T, std::size_t N,
std::enable_if_t<(details::is_allowed_extent_conversion<N, Extent>::value &&
details::is_allowed_element_type_conversion<T, element_type>::value),
int> = 0>
constexpr span(std::array<T, N>& arr) noexcept
: storage_(KnownNotNull{arr.data()}, details::extent_type<N>())
{}
template <class T, std::size_t N,
std::enable_if_t<
(details::is_allowed_extent_conversion<N, Extent>::value &&
details::is_allowed_element_type_conversion<const T, element_type>::value),
int> = 0>
constexpr span(const std::array<T, N>& arr) noexcept
: storage_(KnownNotNull{arr.data()}, details::extent_type<N>())
{}
// NB: the SFINAE on these constructors uses .data() as an incomplete/imperfect proxy for the
// requirement on Container to be a contiguous sequence container.
template <std::size_t MyExtent = Extent, class Container,
std::enable_if_t<
MyExtent != dynamic_extent && !details::is_span<Container>::value &&
!details::is_std_array<Container>::value &&
std::is_pointer<decltype(std::declval<Container&>().data())>::value &&
std::is_convertible<
std::remove_pointer_t<decltype(std::declval<Container&>().data())> (*)[],
element_type (*)[]>::value,
int> = 0>
constexpr explicit span(Container& cont) noexcept : span(cont.data(), cont.size())
{}
template <std::size_t MyExtent = Extent, class Container,
std::enable_if_t<
MyExtent == dynamic_extent && !details::is_span<Container>::value &&
!details::is_std_array<Container>::value &&
std::is_pointer<decltype(std::declval<Container&>().data())>::value &&
std::is_convertible<
std::remove_pointer_t<decltype(std::declval<Container&>().data())> (*)[],
element_type (*)[]>::value,
int> = 0>
constexpr span(Container& cont) noexcept : span(cont.data(), cont.size())
{}
template <
std::size_t MyExtent = Extent, class Container,
std::enable_if_t<
MyExtent != dynamic_extent && std::is_const<element_type>::value &&
!details::is_span<Container>::value && !details::is_std_array<Container>::value &&
std::is_pointer<decltype(std::declval<const Container&>().data())>::value &&
std::is_convertible<
std::remove_pointer_t<decltype(std::declval<const Container&>().data())> (*)[],
element_type (*)[]>::value,
int> = 0>
constexpr explicit span(const Container& cont) noexcept : span(cont.data(), cont.size())
{}
template <
std::size_t MyExtent = Extent, class Container,
std::enable_if_t<
MyExtent == dynamic_extent && std::is_const<element_type>::value &&
!details::is_span<Container>::value && !details::is_std_array<Container>::value &&
std::is_pointer<decltype(std::declval<const Container&>().data())>::value &&
std::is_convertible<
std::remove_pointer_t<decltype(std::declval<const Container&>().data())> (*)[],
element_type (*)[]>::value,
int> = 0>
constexpr span(const Container& cont) noexcept : span(cont.data(), cont.size())
{}
constexpr span(const span& other) noexcept = default;
template <class OtherElementType, std::size_t OtherExtent, std::size_t MyExtent = Extent,
std::enable_if_t<(MyExtent == dynamic_extent || MyExtent == OtherExtent) &&
details::is_allowed_element_type_conversion<OtherElementType,
element_type>::value,
int> = 0>
constexpr span(const span<OtherElementType, OtherExtent>& other) noexcept
: storage_(other.data(), details::extent_type<OtherExtent>(other.size()))
{}
template <class OtherElementType, std::size_t OtherExtent, std::size_t MyExtent = Extent,
std::enable_if_t<MyExtent != dynamic_extent && OtherExtent == dynamic_extent &&
details::is_allowed_element_type_conversion<OtherElementType,
element_type>::value,
int> = 0>
constexpr explicit span(const span<OtherElementType, OtherExtent>& other) noexcept
: storage_(other.data(), details::extent_type<OtherExtent>(other.size()))
{}
~span() noexcept = default;
constexpr span& operator=(const span& other) noexcept = default;
// [span.sub], span subviews
template <std::size_t Count>
constexpr span<element_type, Count> first() const noexcept
{
Expects(Count <= size());
return span<element_type, Count>{data(), Count};
}
template <std::size_t Count>
// clang-format off
GSL_SUPPRESS(bounds.1) // NO-FORMAT: attribute
// clang-format on
constexpr span<element_type, Count> last() const noexcept
{
Expects(Count <= size());
return span<element_type, Count>{data() + (size() - Count), Count};
}
template <std::size_t Offset, std::size_t Count = dynamic_extent>
// clang-format off
GSL_SUPPRESS(bounds.1) // NO-FORMAT: attribute
// clang-format on
constexpr auto subspan() const noexcept ->
typename details::calculate_subspan_type<ElementType, Extent, Offset, Count>::type
{
Expects((size() >= Offset) && (Count == dynamic_extent || (Count <= size() - Offset)));
using type =
typename details::calculate_subspan_type<ElementType, Extent, Offset, Count>::type;
return type{data() + Offset, Count == dynamic_extent ? size() - Offset : Count};
}
constexpr span<element_type, dynamic_extent> first(size_type count) const noexcept
{
Expects(count <= size());
return {data(), count};
}
constexpr span<element_type, dynamic_extent> last(size_type count) const noexcept
{
Expects(count <= size());
return make_subspan(size() - count, dynamic_extent, subspan_selector<Extent>{});
}
constexpr span<element_type, dynamic_extent>
subspan(size_type offset, size_type count = dynamic_extent) const noexcept
{
return make_subspan(offset, count, subspan_selector<Extent>{});
}
// [span.obs], span observers
constexpr size_type size() const noexcept { return storage_.size(); }
constexpr size_type size_bytes() const noexcept
{
Expects(size() < dynamic_extent / sizeof(element_type));
return size() * sizeof(element_type);
}
constexpr bool empty() const noexcept { return size() == 0; }
// [span.elem], span element access
// clang-format off
GSL_SUPPRESS(bounds.1) // NO-FORMAT: attribute
// clang-format on
constexpr reference operator[](size_type idx) const noexcept
{
Expects(idx < size());
return data()[idx];
}
constexpr reference front() const noexcept
{
Expects(size() > 0);
return data()[0];
}
constexpr reference back() const noexcept
{
Expects(size() > 0);
return data()[size() - 1];
}
constexpr pointer data() const noexcept { return storage_.data(); }
// [span.iter], span iterator support
constexpr iterator begin() const noexcept
{
const auto data = storage_.data();
// clang-format off
GSL_SUPPRESS(bounds.1) // NO-FORMAT: attribute
// clang-format on
return {data, data + size(), data};
}
constexpr iterator end() const noexcept
{
const auto data = storage_.data();
// clang-format off
GSL_SUPPRESS(bounds.1) // NO-FORMAT: attribute
// clang-format on
const auto endData = data + storage_.size();
return {data, endData, endData};
}
constexpr reverse_iterator rbegin() const noexcept { return reverse_iterator{end()}; }
constexpr reverse_iterator rend() const noexcept { return reverse_iterator{begin()}; }
#ifdef _MSC_VER
// Tell MSVC how to unwrap spans in range-based-for
constexpr pointer _Unchecked_begin() const noexcept { return data(); }
constexpr pointer _Unchecked_end() const noexcept
{
// clang-format off
GSL_SUPPRESS(bounds.1) // NO-FORMAT: attribute
// clang-format on
return data() + size();
}
#endif // _MSC_VER
private:
// Needed to remove unnecessary null check in subspans
struct KnownNotNull
{
pointer p;
};
// this implementation detail class lets us take advantage of the
// empty base class optimization to pay for only storage of a single
// pointer in the case of fixed-size spans
template <class ExtentType>
class storage_type : public ExtentType
{
public:
// KnownNotNull parameter is needed to remove unnecessary null check
// in subspans and constructors from arrays
template <class OtherExtentType>
constexpr storage_type(KnownNotNull data, OtherExtentType ext)
: ExtentType(ext), data_(data.p)
{
Expects(ExtentType::size() != dynamic_extent);
}
template <class OtherExtentType>
constexpr storage_type(pointer data, OtherExtentType ext) : ExtentType(ext), data_(data)
{
Expects(ExtentType::size() != dynamic_extent);
Expects(data || ExtentType::size() == 0);
}
constexpr pointer data() const noexcept { return data_; }
private:
pointer data_;
};
storage_type<details::extent_type<Extent>> storage_;
// The rest is needed to remove unnecessary null check
// in subspans and constructors from arrays
constexpr span(KnownNotNull ptr, size_type count) noexcept : storage_(ptr, count) {}
template <std::size_t CallerExtent>
class subspan_selector
{
};
template <std::size_t CallerExtent>
constexpr span<element_type, dynamic_extent>
make_subspan(size_type offset, size_type count, subspan_selector<CallerExtent>) const noexcept
{
const span<element_type, dynamic_extent> tmp(*this);
return tmp.subspan(offset, count);
}
// clang-format off
GSL_SUPPRESS(bounds.1) // NO-FORMAT: attribute
// clang-format on
constexpr span<element_type, dynamic_extent>
make_subspan(size_type offset, size_type count, subspan_selector<dynamic_extent>) const noexcept
{
Expects(size() >= offset);
if (count == dynamic_extent) { return {KnownNotNull{data() + offset}, size() - offset}; }
Expects(size() - offset >= count);
return {KnownNotNull{data() + offset}, count};
}
};
#if (defined(__cpp_deduction_guides) && (__cpp_deduction_guides >= 201611L))
// Deduction Guides
template <class Type, std::size_t Extent>
span(Type (&)[Extent]) -> span<Type, Extent>;
template <class Type, std::size_t Size>
span(std::array<Type, Size>&) -> span<Type, Size>;
template <class Type, std::size_t Size>
span(const std::array<Type, Size>&) -> span<const Type, Size>;
template <class Container,
class Element = std::remove_pointer_t<decltype(std::declval<Container&>().data())>>
span(Container&) -> span<Element>;
template <class Container,
class Element = std::remove_pointer_t<decltype(std::declval<const Container&>().data())>>
span(const Container&) -> span<Element>;
#endif // ( defined(__cpp_deduction_guides) && (__cpp_deduction_guides >= 201611L) )
#if defined(GSL_USE_STATIC_CONSTEXPR_WORKAROUND)
template <class ElementType, std::size_t Extent>
constexpr const typename span<ElementType, Extent>::size_type span<ElementType, Extent>::extent;
#endif
namespace details
{
// if we only supported compilers with good constexpr support then
// this pair of classes could collapse down to a constexpr function
// we should use a narrow_cast<> to go to std::size_t, but older compilers may not see it as
// constexpr
// and so will fail compilation of the template
template <class ElementType, std::size_t Extent>
struct calculate_byte_size : std::integral_constant<std::size_t, sizeof(ElementType) * Extent>
{
static_assert(Extent < dynamic_extent / sizeof(ElementType), "Size is too big.");
};
template <class ElementType>
struct calculate_byte_size<ElementType, dynamic_extent>
: std::integral_constant<std::size_t, dynamic_extent>
{
};
} // namespace details
// [span.objectrep], views of object representation
template <class ElementType, std::size_t Extent>
span<const byte, details::calculate_byte_size<ElementType, Extent>::value>
as_bytes(span<ElementType, Extent> s) noexcept
{
using type = span<const byte, details::calculate_byte_size<ElementType, Extent>::value>;
// clang-format off
GSL_SUPPRESS(type.1) // NO-FORMAT: attribute
// clang-format on
return type{reinterpret_cast<const byte*>(s.data()), s.size_bytes()};
}
template <class ElementType, std::size_t Extent,
std::enable_if_t<!std::is_const<ElementType>::value, int> = 0>
span<byte, details::calculate_byte_size<ElementType, Extent>::value>
as_writable_bytes(span<ElementType, Extent> s) noexcept
{
using type = span<byte, details::calculate_byte_size<ElementType, Extent>::value>;
// clang-format off
GSL_SUPPRESS(type.1) // NO-FORMAT: attribute
// clang-format on
return type{reinterpret_cast<byte*>(s.data()), s.size_bytes()};
}
} // namespace gsl
#if defined(_MSC_VER) && !defined(__clang__)
#pragma warning(pop)
#endif // _MSC_VER
#if defined(__GNUC__) && __GNUC__ > 6
#pragma GCC diagnostic pop
#endif // __GNUC__ > 6
#endif // GSL_SPAN_H

197
deps/GSL/include/gsl/span_ext vendored Normal file
View File

@ -0,0 +1,197 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef GSL_SPAN_EXT_H
#define GSL_SPAN_EXT_H
///////////////////////////////////////////////////////////////////////////////
//
// File: span_ext
// Purpose: continue offering features that have been cut from the official
// implementation of span.
// While modernizing gsl::span a number of features needed to be removed to
// be compliant with the design of std::span
//
///////////////////////////////////////////////////////////////////////////////
#include <gsl/span> // for span
#include <gsl/util> // for narrow_cast, narrow
#include <algorithm> // for lexicographical_compare
#include <cstddef> // for ptrdiff_t, size_t
#include <utility>
namespace gsl
{
// [span.comparison], span comparison operators
template <class ElementType, std::size_t FirstExtent, std::size_t SecondExtent>
constexpr bool operator==(span<ElementType, FirstExtent> l, span<ElementType, SecondExtent> r)
{
return std::equal(l.begin(), l.end(), r.begin(), r.end());
}
template <class ElementType, std::size_t Extent>
constexpr bool operator!=(span<ElementType, Extent> l, span<ElementType, Extent> r)
{
return !(l == r);
}
template <class ElementType, std::size_t Extent>
constexpr bool operator<(span<ElementType, Extent> l, span<ElementType, Extent> r)
{
return std::lexicographical_compare(l.begin(), l.end(), r.begin(), r.end());
}
template <class ElementType, std::size_t Extent>
constexpr bool operator<=(span<ElementType, Extent> l, span<ElementType, Extent> r)
{
return !(l > r);
}
template <class ElementType, std::size_t Extent>
constexpr bool operator>(span<ElementType, Extent> l, span<ElementType, Extent> r)
{
return r < l;
}
template <class ElementType, std::size_t Extent>
constexpr bool operator>=(span<ElementType, Extent> l, span<ElementType, Extent> r)
{
return !(l < r);
}
//
// make_span() - Utility functions for creating spans
//
template <class ElementType>
constexpr span<ElementType> make_span(ElementType* ptr, typename span<ElementType>::size_type count)
{
return span<ElementType>(ptr, count);
}
template <class ElementType>
constexpr span<ElementType> make_span(ElementType* firstElem, ElementType* lastElem)
{
return span<ElementType>(firstElem, lastElem);
}
template <class ElementType, std::size_t N>
constexpr span<ElementType, N> make_span(ElementType (&arr)[N]) noexcept
{
return span<ElementType, N>(arr);
}
template <class Container>
constexpr span<typename Container::value_type> make_span(Container& cont)
{
return span<typename Container::value_type>(cont);
}
template <class Container>
constexpr span<const typename Container::value_type> make_span(const Container& cont)
{
return span<const typename Container::value_type>(cont);
}
template <class Ptr>
constexpr span<typename Ptr::element_type> make_span(Ptr& cont, std::size_t count)
{
return span<typename Ptr::element_type>(cont, count);
}
template <class Ptr>
constexpr span<typename Ptr::element_type> make_span(Ptr& cont)
{
return span<typename Ptr::element_type>(cont);
}
// Specialization of gsl::at for span
template <class ElementType, std::size_t Extent>
constexpr ElementType& at(span<ElementType, Extent> s, index i)
{
// No bounds checking here because it is done in span::operator[] called below
Ensures(i >= 0);
return s[narrow_cast<std::size_t>(i)];
}
// [span.obs] Free observer functions
template <class ElementType, std::size_t Extent>
constexpr std::ptrdiff_t ssize(const span<ElementType, Extent>& s) noexcept
{
return static_cast<std::ptrdiff_t>(s.size());
}
// [span.iter] Free functions for begin/end functions
template <class ElementType, std::size_t Extent>
constexpr typename span<ElementType, Extent>::iterator
begin(const span<ElementType, Extent>& s) noexcept
{
return s.begin();
}
template <class ElementType, std::size_t Extent = dynamic_extent>
constexpr typename span<ElementType, Extent>::iterator
end(const span<ElementType, Extent>& s) noexcept
{
return s.end();
}
template <class ElementType, std::size_t Extent>
constexpr typename span<ElementType, Extent>::reverse_iterator
rbegin(const span<ElementType, Extent>& s) noexcept
{
return s.rbegin();
}
template <class ElementType, std::size_t Extent>
constexpr typename span<ElementType, Extent>::reverse_iterator
rend(const span<ElementType, Extent>& s) noexcept
{
return s.rend();
}
template <class ElementType, std::size_t Extent>
constexpr typename span<ElementType, Extent>::iterator
cbegin(const span<ElementType, Extent>& s) noexcept
{
return s.begin();
}
template <class ElementType, std::size_t Extent = dynamic_extent>
constexpr typename span<ElementType, Extent>::iterator
cend(const span<ElementType, Extent>& s) noexcept
{
return s.end();
}
template <class ElementType, std::size_t Extent>
constexpr typename span<ElementType, Extent>::reverse_iterator
crbegin(const span<ElementType, Extent>& s) noexcept
{
return s.rbegin();
}
template <class ElementType, std::size_t Extent>
constexpr typename span<ElementType, Extent>::reverse_iterator
crend(const span<ElementType, Extent>& s) noexcept
{
return s.rend();
}
} // namespace gsl
#endif // GSL_SPAN_EXT_H

767
deps/GSL/include/gsl/string_span vendored Normal file
View File

@ -0,0 +1,767 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef GSL_STRING_SPAN_H
#define GSL_STRING_SPAN_H
#include <gsl/assert> // for Ensures, Expects
#include <gsl/span_ext> // for operator!=, operator==, dynamic_extent
#include <gsl/util> // for narrow_cast
#include <algorithm> // for equal, lexicographical_compare
#include <array> // for array
#include <cstddef> // for size_t, nullptr_t
#include <cstdint> // for PTRDIFF_MAX
#include <cstring>
#include <string> // for basic_string, allocator, char_traits
#include <type_traits> // for declval, is_convertible, enable_if_t, add_...
#if defined(_MSC_VER) && !defined(__clang__)
#pragma warning(push)
// Turn MSVC /analyze rules that generate too much noise. TODO: fix in the tool.
#pragma warning(disable : 26446) // TODO: bug in parser - attributes and templates
#pragma warning(disable : 26481) // TODO: suppress does not work inside templates sometimes
#pragma warning(disable : 4996) // use of functions & classes marked [[deprecated]]
#endif // _MSC_VER
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
namespace gsl
{
//
// czstring and wzstring
//
// These are "tag" typedefs for C-style strings (i.e. null-terminated character arrays)
// that allow static analysis to help find bugs.
//
// There are no additional features/semantics that we can find a way to add inside the
// type system for these types that will not either incur significant runtime costs or
// (sometimes needlessly) break existing programs when introduced.
//
template <typename CharT, std::size_t Extent = dynamic_extent>
using basic_zstring = CharT*;
template <std::size_t Extent = dynamic_extent>
using czstring = basic_zstring<const char, Extent>;
template <std::size_t Extent = dynamic_extent>
using cwzstring = basic_zstring<const wchar_t, Extent>;
template <std::size_t Extent = dynamic_extent>
using cu16zstring = basic_zstring<const char16_t, Extent>;
template <std::size_t Extent = dynamic_extent>
using cu32zstring = basic_zstring<const char32_t, Extent>;
template <std::size_t Extent = dynamic_extent>
using zstring = basic_zstring<char, Extent>;
template <std::size_t Extent = dynamic_extent>
using wzstring = basic_zstring<wchar_t, Extent>;
template <std::size_t Extent = dynamic_extent>
using u16zstring = basic_zstring<char16_t, Extent>;
template <std::size_t Extent = dynamic_extent>
using u32zstring = basic_zstring<char32_t, Extent>;
namespace details
{
template <class CharT>
[[deprecated("string_span was removed from the C++ Core Guidelines. For more information, see "
"isocpp/CppCoreGuidelines PR#1680")]] constexpr std::size_t
string_length(const CharT* str, std::size_t n)
{
if (str == nullptr || n == dynamic_extent) return 0;
const span<const CharT> str_span{str, n};
std::size_t len = 0;
while (len < n && str_span[len]) len++;
return len;
}
} // namespace details
//
// ensure_sentinel()
//
// Provides a way to obtain an span from a contiguous sequence
// that ends with a (non-inclusive) sentinel value.
//
// Will fail-fast if sentinel cannot be found before max elements are examined.
//
template <typename T, const T Sentinel>
[[deprecated("string_span was removed from the C++ Core Guidelines. For more information, see "
"isocpp/CppCoreGuidelines PR#1680")]] constexpr span<T, dynamic_extent>
ensure_sentinel(T* seq, std::size_t max = static_cast<std::size_t>(-1))
{
Ensures(seq != nullptr);
// clang-format off
GSL_SUPPRESS(f.23) // TODO: false positive // TODO: suppress does not work
// clang-format on
auto cur = seq;
Ensures(cur != nullptr); // workaround for removing the warning
// clang-format off
GSL_SUPPRESS(bounds.1) // TODO: suppress does not work
// clang-format on
while (static_cast<std::size_t>(cur - seq) < max && *cur != Sentinel) ++cur;
Ensures(*cur == Sentinel);
return {seq, static_cast<std::size_t>(cur - seq)};
}
//
// ensure_z - creates a span for a zero terminated strings. The span will not contain the zero
// termination. Will fail fast if a null-terminator cannot be found before the limit of size_type.
//
template <typename CharT>
[[deprecated("string_span was removed from the C++ Core Guidelines. For more information, see "
"isocpp/CppCoreGuidelines PR#1680")]] constexpr span<CharT, dynamic_extent>
ensure_z(CharT* const& sz, std::size_t max = static_cast<std::size_t>(-1))
{
return ensure_sentinel<CharT, CharT(0)>(sz, max);
}
template <typename CharT, std::size_t N>
constexpr span<CharT, dynamic_extent> ensure_z(CharT (&sz)[N])
{
return ensure_z(&sz[0], N);
}
template <class Cont>
[[deprecated(
"string_span was removed from the C++ Core Guidelines. For more information, see "
"isocpp/CppCoreGuidelines PR#1680")]] constexpr span<typename std::
remove_pointer<
typename Cont::pointer>::type,
dynamic_extent>
ensure_z(Cont& cont)
{
return ensure_z(cont.data(), cont.size());
}
template <typename CharT, std::size_t>
class [[deprecated("string_span was removed from the C++ Core Guidelines. For more information, "
"see isocpp/CppCoreGuidelines PR#1680")]] basic_string_span;
namespace details
{
template <typename T>
struct [[deprecated(
"string_span was removed from the C++ Core Guidelines. For more information, "
"see isocpp/CppCoreGuidelines PR#1680")]] is_basic_string_span_oracle : std::false_type{};
template <typename CharT, std::size_t Extent>
struct [[deprecated(
"string_span was removed from the C++ Core Guidelines. For more information, see "
"isocpp/CppCoreGuidelines PR#1680")]] is_basic_string_span_oracle<basic_string_span<CharT,
Extent>>
: std::true_type{};
template <typename T>
struct [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] is_basic_string_span
: is_basic_string_span_oracle<std::remove_cv_t<T>>{};
} // namespace details
//
// string_span and relatives
//
template <typename CharT, std::size_t Extent = dynamic_extent>
class [[deprecated("string_span was removed from the C++ Core Guidelines. For more information, "
"see isocpp/CppCoreGuidelines PR#1680")]] basic_string_span
{
public:
using element_type = CharT;
using value_type = std::remove_cv_t<element_type>;
using pointer = std::add_pointer_t<element_type>;
using reference = std::add_lvalue_reference_t<element_type>;
using const_reference = std::add_lvalue_reference_t<std::add_const_t<element_type>>;
using impl_type = span<element_type, Extent>;
using size_type = typename impl_type::size_type;
using iterator = typename impl_type::iterator;
using reverse_iterator = typename impl_type::reverse_iterator;
// default (empty)
constexpr basic_string_span() noexcept = default;
// copy
constexpr basic_string_span(const basic_string_span& other) noexcept = default;
// assign
constexpr basic_string_span& operator=(const basic_string_span& other) noexcept = default;
constexpr basic_string_span(pointer ptr, size_type length) : span_(ptr, length) {}
constexpr basic_string_span(pointer firstElem, pointer lastElem) : span_(firstElem, lastElem) {}
// From static arrays - if 0-terminated, remove 0 from the view
// All other containers allow 0s within the length, so we do not remove them
template <std::size_t N>
constexpr basic_string_span(element_type(&arr)[N]) : span_(remove_z(arr))
{}
template <std::size_t N, class ArrayElementType = std::remove_const_t<element_type>>
constexpr basic_string_span(std::array<ArrayElementType, N> & arr) noexcept : span_(arr)
{}
template <std::size_t N, class ArrayElementType = std::remove_const_t<element_type>>
constexpr basic_string_span(const std::array<ArrayElementType, N>& arr) noexcept : span_(arr)
{}
// Container signature should work for basic_string after C++17 version exists
template <class Traits, class Allocator>
// GSL_SUPPRESS(bounds.4) // TODO: parser bug
constexpr basic_string_span(std::basic_string<element_type, Traits, Allocator> & str)
: span_(&str[0], str.length())
{}
template <class Traits, class Allocator>
constexpr basic_string_span(const std::basic_string<element_type, Traits, Allocator>& str)
: span_(&str[0], str.length())
{}
// from containers. Containers must have a pointer type and data() function signatures
template <class Container,
class = std::enable_if_t<
!details::is_basic_string_span<Container>::value &&
std::is_convertible<typename Container::pointer, pointer>::value &&
std::is_convertible<typename Container::pointer,
decltype(std::declval<Container>().data())>::value>>
constexpr basic_string_span(Container & cont) : span_(cont)
{}
template <class Container,
class = std::enable_if_t<
!details::is_basic_string_span<Container>::value &&
std::is_convertible<typename Container::pointer, pointer>::value &&
std::is_convertible<typename Container::pointer,
decltype(std::declval<Container>().data())>::value>>
constexpr basic_string_span(const Container& cont) : span_(cont)
{}
// from string_span
template <
class OtherValueType, std::size_t OtherExtent,
class = std::enable_if_t<std::is_convertible<
typename basic_string_span<OtherValueType, OtherExtent>::impl_type, impl_type>::value>>
constexpr basic_string_span(basic_string_span<OtherValueType, OtherExtent> other)
: span_(other.data(), other.length())
{}
template <size_type Count>
constexpr basic_string_span<element_type, Count> first() const
{
return {span_.template first<Count>()};
}
constexpr basic_string_span<element_type, dynamic_extent> first(size_type count) const
{
return {span_.first(count)};
}
template <size_type Count>
constexpr basic_string_span<element_type, Count> last() const
{
return {span_.template last<Count>()};
}
constexpr basic_string_span<element_type, dynamic_extent> last(size_type count) const
{
return {span_.last(count)};
}
template <size_type Offset, size_type Count>
constexpr basic_string_span<element_type, Count> subspan() const
{
return {span_.template subspan<Offset, Count>()};
}
constexpr basic_string_span<element_type, dynamic_extent> subspan(
size_type offset, size_type count = dynamic_extent) const
{
return {span_.subspan(offset, count)};
}
constexpr reference operator[](size_type idx) const { return span_[idx]; }
constexpr reference operator()(size_type idx) const { return span_[idx]; }
constexpr pointer data() const { return span_.data(); }
constexpr size_type length() const noexcept { return span_.size(); }
constexpr size_type size() const noexcept { return span_.size(); }
constexpr size_type size_bytes() const noexcept { return span_.size_bytes(); }
constexpr size_type length_bytes() const noexcept { return span_.length_bytes(); }
constexpr bool empty() const noexcept { return size() == 0; }
constexpr iterator begin() const noexcept { return span_.begin(); }
constexpr iterator end() const noexcept { return span_.end(); }
constexpr reverse_iterator rbegin() const noexcept { return span_.rbegin(); }
constexpr reverse_iterator rend() const noexcept { return span_.rend(); }
private:
static constexpr impl_type remove_z(pointer const& sz, std::size_t max)
{
return impl_type(sz, details::string_length(sz, max));
}
template <std::size_t N>
static constexpr impl_type remove_z(element_type(&sz)[N])
{
return remove_z(&sz[0], N);
}
impl_type span_;
};
template <std::size_t Extent = dynamic_extent>
using string_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_string_span<char, Extent>;
template <std::size_t Extent = dynamic_extent>
using cstring_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_string_span<const char, Extent>;
template <std::size_t Extent = dynamic_extent>
using wstring_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_string_span<wchar_t, Extent>;
template <std::size_t Extent = dynamic_extent>
using cwstring_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_string_span<const wchar_t, Extent>;
template <std::size_t Extent = dynamic_extent>
using u16string_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_string_span<char16_t, Extent>;
template <std::size_t Extent = dynamic_extent>
using cu16string_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_string_span<const char16_t, Extent>;
template <std::size_t Extent = dynamic_extent>
using u32string_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_string_span<char32_t, Extent>;
template <std::size_t Extent = dynamic_extent>
using cu32string_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_string_span<const char32_t, Extent>;
//
// to_string() allow (explicit) conversions from string_span to string
//
template <typename CharT, std::size_t Extent>
constexpr std::basic_string<typename std::remove_const<CharT>::type>
to_string(basic_string_span<CharT, Extent> view)
{
return {view.data(), narrow_cast<std::size_t>(view.length())};
}
template <typename CharT, typename Traits = typename std::char_traits<CharT>,
typename Allocator = std::allocator<CharT>, typename gCharT, std::size_t Extent>
constexpr std::basic_string<CharT, Traits, Allocator>
to_basic_string(basic_string_span<gCharT, Extent> view)
{
return {view.data(), narrow_cast<std::size_t>(view.length())};
}
template <class ElementType, std::size_t Extent>
constexpr basic_string_span<const byte, details::calculate_byte_size<ElementType, Extent>::value>
as_bytes(basic_string_span<ElementType, Extent> s) noexcept
{
// clang-format off
GSL_SUPPRESS(type.1)
// clang-format on
return {reinterpret_cast<const byte*>(s.data()), s.size_bytes()};
}
template <class ElementType, std::size_t Extent,
class = std::enable_if_t<!std::is_const<ElementType>::value>>
constexpr basic_string_span<byte, details::calculate_byte_size<ElementType, Extent>::value>
as_writable_bytes(basic_string_span<ElementType, Extent> s) noexcept
{
// clang-format off
GSL_SUPPRESS(type.1)
// clang-format on
return {reinterpret_cast<byte*>(s.data()), s.size_bytes()};
}
// zero-terminated string span, used to convert
// zero-terminated spans to legacy strings
template <typename CharT, std::size_t Extent = dynamic_extent>
class [[deprecated("string_span was removed from the C++ Core Guidelines. For more information, "
"see isocpp/CppCoreGuidelines PR#1680")]] basic_zstring_span
{
public:
using value_type = CharT;
using const_value_type = std::add_const_t<CharT>;
using pointer = std::add_pointer_t<value_type>;
using const_pointer = std::add_pointer_t<const_value_type>;
using zstring_type = basic_zstring<value_type, Extent>;
using const_zstring_type = basic_zstring<const_value_type, Extent>;
using impl_type = span<value_type, Extent>;
using string_span_type = basic_string_span<value_type, Extent>;
constexpr basic_zstring_span(impl_type s) : span_(s)
{
// expects a zero-terminated span
Expects(s.size() > 0);
Expects(s[s.size() - 1] == value_type{});
}
// copy
constexpr basic_zstring_span(const basic_zstring_span& other) = default;
// move
constexpr basic_zstring_span(basic_zstring_span && other) = default;
// assign
constexpr basic_zstring_span& operator=(const basic_zstring_span& other) = default;
// move assign
constexpr basic_zstring_span& operator=(basic_zstring_span&& other) = default;
constexpr bool empty() const noexcept { return false; }
constexpr string_span_type as_string_span() const noexcept
{
return {span_.data(), span_.size() - 1};
}
constexpr string_span_type ensure_z() const { return gsl::ensure_z(span_); }
constexpr const_zstring_type assume_z() const noexcept { return span_.data(); }
private:
impl_type span_;
};
template <std::size_t Max = dynamic_extent>
using zstring_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_zstring_span<char, Max>;
template <std::size_t Max = dynamic_extent>
using wzstring_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_zstring_span<wchar_t, Max>;
template <std::size_t Max = dynamic_extent>
using u16zstring_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_zstring_span<char16_t, Max>;
template <std::size_t Max = dynamic_extent>
using u32zstring_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_zstring_span<char32_t, Max>;
template <std::size_t Max = dynamic_extent>
using czstring_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_zstring_span<const char, Max>;
template <std::size_t Max = dynamic_extent>
using cwzstring_span [[deprecated("string_span was removed from the C++ Core Guidelines. For more "
"information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_zstring_span<const wchar_t, Max>;
template <std::size_t Max = dynamic_extent>
using cu16zstring_span [[deprecated("string_span was removed from the C++ Core Guidelines. For "
"more information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_zstring_span<const char16_t, Max>;
template <std::size_t Max = dynamic_extent>
using cu32zstring_span [[deprecated("string_span was removed from the C++ Core Guidelines. For "
"more information, see isocpp/CppCoreGuidelines PR#1680")]] =
basic_zstring_span<const char32_t, Max>;
// operator ==
template <class CharT, std::size_t Extent, class T,
class = std::enable_if_t<
details::is_basic_string_span<T>::value ||
std::is_convertible<T, gsl::basic_string_span<std::add_const_t<CharT>>>::value>>
bool operator==(const gsl::basic_string_span<CharT, Extent>& one, const T& other)
{
const gsl::basic_string_span<std::add_const_t<CharT>> tmp(other);
return std::equal(one.begin(), one.end(), tmp.begin(), tmp.end());
}
template <class CharT, std::size_t Extent, class T,
class = std::enable_if_t<
!details::is_basic_string_span<T>::value &&
std::is_convertible<T, gsl::basic_string_span<std::add_const_t<CharT>>>::value>>
bool operator==(const T& one, const gsl::basic_string_span<CharT, Extent>& other)
{
const gsl::basic_string_span<std::add_const_t<CharT>> tmp(one);
return std::equal(tmp.begin(), tmp.end(), other.begin(), other.end());
}
// operator !=
template <typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename = std::enable_if_t<std::is_convertible<
T, gsl::basic_string_span<std::add_const_t<CharT>, Extent>>::value>>
bool operator!=(gsl::basic_string_span<CharT, Extent> one, const T& other)
{
return !(one == other);
}
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename = std::enable_if_t<
std::is_convertible<T, gsl::basic_string_span<std::add_const_t<CharT>, Extent>>::value &&
!gsl::details::is_basic_string_span<T>::value>>
bool operator!=(const T& one, gsl::basic_string_span<CharT, Extent> other)
{
return !(one == other);
}
// operator<
template <typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename = std::enable_if_t<std::is_convertible<
T, gsl::basic_string_span<std::add_const_t<CharT>, Extent>>::value>>
bool operator<(gsl::basic_string_span<CharT, Extent> one, const T& other)
{
const gsl::basic_string_span<std::add_const_t<CharT>, Extent> tmp(other);
return std::lexicographical_compare(one.begin(), one.end(), tmp.begin(), tmp.end());
}
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename = std::enable_if_t<
std::is_convertible<T, gsl::basic_string_span<std::add_const_t<CharT>, Extent>>::value &&
!gsl::details::is_basic_string_span<T>::value>>
bool operator<(const T& one, gsl::basic_string_span<CharT, Extent> other)
{
gsl::basic_string_span<std::add_const_t<CharT>, Extent> tmp(one);
return std::lexicographical_compare(tmp.begin(), tmp.end(), other.begin(), other.end());
}
#ifndef _MSC_VER
// VS treats temp and const containers as convertible to basic_string_span,
// so the cases below are already covered by the previous operators
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename DataType = typename T::value_type,
typename = std::enable_if_t<
!gsl::details::is_span<T>::value && !gsl::details::is_basic_string_span<T>::value &&
std::is_convertible<DataType*, CharT*>::value &&
std::is_same<std::decay_t<decltype(std::declval<T>().size(), *std::declval<T>().data())>,
DataType>::value>>
bool operator<(gsl::basic_string_span<CharT, Extent> one, const T& other)
{
gsl::basic_string_span<std::add_const_t<CharT>, Extent> tmp(other);
return std::lexicographical_compare(one.begin(), one.end(), tmp.begin(), tmp.end());
}
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename DataType = typename T::value_type,
typename = std::enable_if_t<
!gsl::details::is_span<T>::value && !gsl::details::is_basic_string_span<T>::value &&
std::is_convertible<DataType*, CharT*>::value &&
std::is_same<std::decay_t<decltype(std::declval<T>().size(), *std::declval<T>().data())>,
DataType>::value>>
bool operator<(const T& one, gsl::basic_string_span<CharT, Extent> other)
{
gsl::basic_string_span<std::add_const_t<CharT>, Extent> tmp(one);
return std::lexicographical_compare(tmp.begin(), tmp.end(), other.begin(), other.end());
}
#endif
// operator <=
template <typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename = std::enable_if_t<std::is_convertible<
T, gsl::basic_string_span<std::add_const_t<CharT>, Extent>>::value>>
bool operator<=(gsl::basic_string_span<CharT, Extent> one, const T& other)
{
return !(other < one);
}
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename = std::enable_if_t<
std::is_convertible<T, gsl::basic_string_span<std::add_const_t<CharT>, Extent>>::value &&
!gsl::details::is_basic_string_span<T>::value>>
bool operator<=(const T& one, gsl::basic_string_span<CharT, Extent> other)
{
return !(other < one);
}
#ifndef _MSC_VER
// VS treats temp and const containers as convertible to basic_string_span,
// so the cases below are already covered by the previous operators
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename DataType = typename T::value_type,
typename = std::enable_if_t<
!gsl::details::is_span<T>::value && !gsl::details::is_basic_string_span<T>::value &&
std::is_convertible<DataType*, CharT*>::value &&
std::is_same<std::decay_t<decltype(std::declval<T>().size(), *std::declval<T>().data())>,
DataType>::value>>
bool operator<=(gsl::basic_string_span<CharT, Extent> one, const T& other)
{
return !(other < one);
}
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename DataType = typename T::value_type,
typename = std::enable_if_t<
!gsl::details::is_span<T>::value && !gsl::details::is_basic_string_span<T>::value &&
std::is_convertible<DataType*, CharT*>::value &&
std::is_same<std::decay_t<decltype(std::declval<T>().size(), *std::declval<T>().data())>,
DataType>::value>>
bool operator<=(const T& one, gsl::basic_string_span<CharT, Extent> other)
{
return !(other < one);
}
#endif
// operator>
template <typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename = std::enable_if_t<std::is_convertible<
T, gsl::basic_string_span<std::add_const_t<CharT>, Extent>>::value>>
bool operator>(gsl::basic_string_span<CharT, Extent> one, const T& other)
{
return other < one;
}
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename = std::enable_if_t<
std::is_convertible<T, gsl::basic_string_span<std::add_const_t<CharT>, Extent>>::value &&
!gsl::details::is_basic_string_span<T>::value>>
bool operator>(const T& one, gsl::basic_string_span<CharT, Extent> other)
{
return other < one;
}
#ifndef _MSC_VER
// VS treats temp and const containers as convertible to basic_string_span,
// so the cases below are already covered by the previous operators
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename DataType = typename T::value_type,
typename = std::enable_if_t<
!gsl::details::is_span<T>::value && !gsl::details::is_basic_string_span<T>::value &&
std::is_convertible<DataType*, CharT*>::value &&
std::is_same<std::decay_t<decltype(std::declval<T>().size(), *std::declval<T>().data())>,
DataType>::value>>
bool operator>(gsl::basic_string_span<CharT, Extent> one, const T& other)
{
return other < one;
}
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename DataType = typename T::value_type,
typename = std::enable_if_t<
!gsl::details::is_span<T>::value && !gsl::details::is_basic_string_span<T>::value &&
std::is_convertible<DataType*, CharT*>::value &&
std::is_same<std::decay_t<decltype(std::declval<T>().size(), *std::declval<T>().data())>,
DataType>::value>>
bool operator>(const T& one, gsl::basic_string_span<CharT, Extent> other)
{
return other < one;
}
#endif
// operator >=
template <typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename = std::enable_if_t<std::is_convertible<
T, gsl::basic_string_span<std::add_const_t<CharT>, Extent>>::value>>
bool operator>=(gsl::basic_string_span<CharT, Extent> one, const T& other)
{
return !(one < other);
}
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename = std::enable_if_t<
std::is_convertible<T, gsl::basic_string_span<std::add_const_t<CharT>, Extent>>::value &&
!gsl::details::is_basic_string_span<T>::value>>
bool operator>=(const T& one, gsl::basic_string_span<CharT, Extent> other)
{
return !(one < other);
}
#ifndef _MSC_VER
// VS treats temp and const containers as convertible to basic_string_span,
// so the cases below are already covered by the previous operators
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename DataType = typename T::value_type,
typename = std::enable_if_t<
!gsl::details::is_span<T>::value && !gsl::details::is_basic_string_span<T>::value &&
std::is_convertible<DataType*, CharT*>::value &&
std::is_same<std::decay_t<decltype(std::declval<T>().size(), *std::declval<T>().data())>,
DataType>::value>>
bool operator>=(gsl::basic_string_span<CharT, Extent> one, const T& other)
{
return !(one < other);
}
template <
typename CharT, std::size_t Extent = dynamic_extent, typename T,
typename DataType = typename T::value_type,
typename = std::enable_if_t<
!gsl::details::is_span<T>::value && !gsl::details::is_basic_string_span<T>::value &&
std::is_convertible<DataType*, CharT*>::value &&
std::is_same<std::decay_t<decltype(std::declval<T>().size(), *std::declval<T>().data())>,
DataType>::value>>
bool operator>=(const T& one, gsl::basic_string_span<CharT, Extent> other)
{
return !(one < other);
}
#endif
} // namespace gsl
#if defined(_MSC_VER) && !defined(__clang__)
#pragma warning(pop)
#endif // _MSC_VER
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
#endif // GSL_STRING_SPAN_H

145
deps/GSL/include/gsl/util vendored Normal file
View File

@ -0,0 +1,145 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef GSL_UTIL_H
#define GSL_UTIL_H
#include <gsl/assert> // for Expects
#include <array>
#include <cstddef> // for ptrdiff_t, size_t
#include <initializer_list> // for initializer_list
#include <type_traits> // for is_signed, integral_constant
#include <utility> // for exchange, forward
#if defined(_MSC_VER) && !defined(__clang__)
#pragma warning(push)
#pragma warning(disable : 4127) // conditional expression is constant
#endif // _MSC_VER
#if defined(__cplusplus) && (__cplusplus >= 201703L)
#define GSL_NODISCARD [[nodiscard]]
#else
#define GSL_NODISCARD
#endif // defined(__cplusplus) && (__cplusplus >= 201703L)
namespace gsl
{
//
// GSL.util: utilities
//
// index type for all container indexes/subscripts/sizes
using index = std::ptrdiff_t;
// final_action allows you to ensure something gets run at the end of a scope
template <class F>
class final_action
{
public:
static_assert(!std::is_reference<F>::value && !std::is_const<F>::value &&
!std::is_volatile<F>::value,
"Final_action should store its callable by value");
explicit final_action(F f) noexcept : f_(std::move(f)) {}
final_action(final_action&& other) noexcept
: f_(std::move(other.f_)), invoke_(std::exchange(other.invoke_, false))
{}
final_action(const final_action&) = delete;
final_action& operator=(const final_action&) = delete;
final_action& operator=(final_action&&) = delete;
// clang-format off
GSL_SUPPRESS(f.6) // NO-FORMAT: attribute // terminate if throws
// clang-format on
~final_action() noexcept
{
if (invoke_) f_();
}
private:
F f_;
bool invoke_{true};
};
// finally() - convenience function to generate a final_action
template <class F>
GSL_NODISCARD final_action<typename std::remove_cv<typename std::remove_reference<F>::type>::type>
finally(F&& f) noexcept
{
return final_action<typename std::remove_cv<typename std::remove_reference<F>::type>::type>(
std::forward<F>(f));
}
// narrow_cast(): a searchable way to do narrowing casts of values
template <class T, class U>
// clang-format off
GSL_SUPPRESS(type.1) // NO-FORMAT: attribute
// clang-format on
constexpr T narrow_cast(U&& u) noexcept
{
return static_cast<T>(std::forward<U>(u));
}
//
// at() - Bounds-checked way of accessing builtin arrays, std::array, std::vector
//
template <class T, std::size_t N>
// clang-format off
GSL_SUPPRESS(bounds.4) // NO-FORMAT: attribute
GSL_SUPPRESS(bounds.2) // NO-FORMAT: attribute
// clang-format on
constexpr T& at(T (&arr)[N], const index i)
{
Expects(i >= 0 && i < narrow_cast<index>(N));
return arr[narrow_cast<std::size_t>(i)];
}
template <class Cont>
// clang-format off
GSL_SUPPRESS(bounds.4) // NO-FORMAT: attribute
GSL_SUPPRESS(bounds.2) // NO-FORMAT: attribute
// clang-format on
constexpr auto at(Cont& cont, const index i) -> decltype(cont[cont.size()])
{
Expects(i >= 0 && i < narrow_cast<index>(cont.size()));
using size_type = decltype(cont.size());
return cont[narrow_cast<size_type>(i)];
}
template <class T>
// clang-format off
GSL_SUPPRESS(bounds.1) // NO-FORMAT: attribute
// clang-format on
constexpr T at(const std::initializer_list<T> cont, const index i)
{
Expects(i >= 0 && i < narrow_cast<index>(cont.size()));
return *(cont.begin() + i);
}
} // namespace gsl
#if defined(_MSC_VER) && !defined(__clang__)
#pragma warning(pop)
#endif // _MSC_VER
#endif // GSL_UTIL_H

26
deps/GSL/pipelines/jobs.yml vendored Normal file
View File

@ -0,0 +1,26 @@
parameters:
jobName: ''
imageName: ''
jobs:
- job:
displayName: ${{ parameters.imageName }}
pool:
vmImage: ${{ parameters.imageName }}
strategy:
matrix:
14_debug:
GSL_CXX_STANDARD: '14'
BUILD_TYPE: 'Debug'
14_release:
GSL_CXX_STANDARD: '14'
BUILD_TYPE: 'Release'
17_debug:
GSL_CXX_STANDARD: '17'
BUILD_TYPE: 'Debug'
17_release:
GSL_CXX_STANDARD: '17'
BUILD_TYPE: 'Release'
continueOnError: false
steps:
- template: ./steps.yml

17
deps/GSL/pipelines/steps.yml vendored Normal file
View File

@ -0,0 +1,17 @@
steps:
- task: CMake@1
name: Configure
inputs:
workingDirectory: build
cmakeArgs: '-DCMAKE_CXX_STANDARD=$(GSL_CXX_STANDARD) -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) .. '
- task: CMake@1
name: Build
inputs:
workingDirectory: build
cmakeArgs: '--build . '
- script: ctest . --output-on-failure --no-compress-output
name: CTest
workingDirectory: build
failOnStderr: true

273
deps/GSL/tests/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,273 @@
cmake_minimum_required(VERSION 3.0.2)
project(GSLTests CXX)
include(FindPkgConfig)
include(ExternalProject)
find_package(Git REQUIRED QUIET)
# will make visual studio generated project group files
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
pkg_search_module(GTestMain gtest_main)
if (NOT GTestMain_FOUND)
configure_file(CMakeLists.txt.in googletest-download/CMakeLists.txt)
execute_process(
COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" .
RESULT_VARIABLE result
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download
)
if(result)
message(FATAL_ERROR "CMake step for googletest failed: ${result}")
endif()
execute_process(
COMMAND ${CMAKE_COMMAND} --build .
RESULT_VARIABLE result
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download
)
if(result)
message(FATAL_ERROR "CMake step for googletest failed: ${result}")
endif()
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
set(GTestMain_LIBRARIES gtest_main)
add_subdirectory(
${CMAKE_CURRENT_BINARY_DIR}/googletest-src
${CMAKE_CURRENT_BINARY_DIR}/googletest-build
EXCLUDE_FROM_ALL
)
endif()
if (MSVC AND (GSL_CXX_STANDARD EQUAL 17))
set(GSL_CPLUSPLUS_OPT -Zc:__cplusplus -permissive-)
endif()
# this interface adds compile options to how the tests are run
# please try to keep entries ordered =)
add_library(gsl_tests_config INTERFACE)
if(MSVC) # MSVC or simulating MSVC
target_compile_options(gsl_tests_config INTERFACE
${GSL_CPLUSPLUS_OPT}
/EHsc
/W4
/WX
$<$<CXX_COMPILER_ID:MSVC>:
/wd4996 # Use of function or classes marked [[deprecated]]
/wd26409 # CppCoreCheck - GTest
/wd26426 # CppCoreCheck - GTest
/wd26440 # CppCoreCheck - GTest
/wd26446 # CppCoreCheck - prefer gsl::at()
/wd26472 # CppCoreCheck - use gsl::narrow(_cast)
/wd26481 # CppCoreCheck - use span instead of pointer arithmetic
$<$<VERSION_LESS:$<CXX_COMPILER_VERSION>,1920>: # VS2015
/wd4189 # variable is initialized but not referenced
$<$<NOT:$<CONFIG:Debug>>: # Release, RelWithDebInfo
/wd4702 # Unreachable code
>
>
>
$<$<CXX_COMPILER_ID:Clang>:
-Weverything
-Wno-c++98-compat
-Wno-c++98-compat-pedantic
-Wno-covered-switch-default # GTest
-Wno-deprecated-declarations # Allow tests for [[deprecated]] elements
-Wno-global-constructors # GTest
-Wno-language-extension-token # GTest gtest-port.h
-Wno-missing-braces
-Wno-missing-prototypes
-Wno-shift-sign-overflow # GTest gtest-port.h
-Wno-undef # GTest
-Wno-used-but-marked-unused # GTest EXPECT_DEATH
$<$<EQUAL:${GSL_CXX_STANDARD},14>: # no support for [[maybe_unused]]
-Wno-unused-member-function
-Wno-unused-variable
>
>
)
else()
target_compile_options(gsl_tests_config INTERFACE
-fno-strict-aliasing
-Wall
-Wcast-align
-Wconversion
-Wctor-dtor-privacy
-Werror
-Wextra
-Wpedantic
-Wshadow
-Wsign-conversion
-Wno-deprecated-declarations # Allow tests for [[deprecated]] elements
$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>>:
-Weverything
-Wno-c++98-compat
-Wno-c++98-compat-pedantic
-Wno-missing-braces
-Wno-covered-switch-default # GTest
-Wno-global-constructors # GTest
-Wno-missing-prototypes
-Wno-padded
-Wno-unknown-attributes
-Wno-used-but-marked-unused # GTest EXPECT_DEATH
-Wno-weak-vtables
$<$<EQUAL:${GSL_CXX_STANDARD},14>: # no support for [[maybe_unused]]
-Wno-unused-member-function
-Wno-unused-variable
>
>
$<$<CXX_COMPILER_ID:Clang>:
$<$<AND:$<VERSION_GREATER:$<CXX_COMPILER_VERSION>,4.99>,$<VERSION_LESS:$<CXX_COMPILER_VERSION>,6>>:
$<$<EQUAL:${GSL_CXX_STANDARD},17>:-Wno-undefined-func-template>
>
>
$<$<CXX_COMPILER_ID:AppleClang>:
$<$<AND:$<VERSION_GREATER:$<CXX_COMPILER_VERSION>,9.1>,$<VERSION_LESS:$<CXX_COMPILER_VERSION>,10>>:
$<$<EQUAL:${GSL_CXX_STANDARD},17>:-Wno-undefined-func-template>
>
>
$<$<CXX_COMPILER_ID:GNU>:
-Wdouble-promotion # float implicit to double
-Wlogical-op # suspicious uses of logical operators
$<$<NOT:$<VERSION_LESS:$<CXX_COMPILER_VERSION>,6>>:
-Wduplicated-cond # duplicated if-else conditions
-Wmisleading-indentation
-Wnull-dereference
$<$<EQUAL:${GSL_CXX_STANDARD},14>: # no support for [[maybe_unused]]
-Wno-unused-variable
>
>
$<$<NOT:$<VERSION_LESS:$<CXX_COMPILER_VERSION>,7>>:
-Wduplicated-branches # identical if-else branches
>
>
)
endif(MSVC)
# for tests to find the gtest header
target_include_directories(gsl_tests_config SYSTEM INTERFACE
googletest/googletest/include
)
set_property(TARGET PROPERTY FOLDER "GSL_tests")
function(add_gsl_test name)
add_executable(${name} ${name}.cpp)
target_link_libraries(${name}
GSL
gsl_tests_config
${GTestMain_LIBRARIES}
)
add_test(
${name}
${name}
)
# group all tests under GSL_tests
set_property(TARGET ${name} PROPERTY FOLDER "GSL_tests")
endfunction()
add_gsl_test(span_tests)
add_gsl_test(span_ext_tests)
add_gsl_test(span_compatibility_tests)
add_gsl_test(string_span_tests)
add_gsl_test(at_tests)
add_gsl_test(notnull_tests)
add_gsl_test(assertion_tests)
add_gsl_test(utils_tests)
add_gsl_test(owner_tests)
add_gsl_test(byte_tests)
add_gsl_test(algorithm_tests)
add_gsl_test(strict_notnull_tests)
# No exception tests
foreach(flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
STRING (REGEX REPLACE "/EHsc" "" ${flag_var} "${${flag_var}}")
endforeach(flag_var)
# this interface adds compile options to how the tests are run
# please try to keep entries ordered =)
add_library(gsl_tests_config_noexcept INTERFACE)
if(MSVC) # MSVC or simulating MSVC
target_compile_definitions(gsl_tests_config_noexcept INTERFACE
_HAS_EXCEPTIONS=0 # disable exceptions in the Microsoft STL
)
target_compile_options(gsl_tests_config_noexcept INTERFACE
${GSL_CPLUSPLUS_OPT}
/W4
/WX
$<$<CXX_COMPILER_ID:MSVC>:
/wd4577
/wd4702
/wd26440 # CppCoreCheck - GTest
/wd26446 # CppCoreCheck - prefer gsl::at()
>
$<$<CXX_COMPILER_ID:Clang>:
-Weverything
-Wno-c++98-compat
-Wno-c++98-compat-pedantic
-Wno-missing-prototypes
-Wno-unknown-attributes
>
)
else()
target_compile_options(gsl_tests_config_noexcept INTERFACE
-fno-exceptions
-fno-strict-aliasing
-Wall
-Wcast-align
-Wconversion
-Wctor-dtor-privacy
-Werror
-Wextra
-Wpedantic
-Wshadow
-Wsign-conversion
$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>>:
-Weverything
-Wno-c++98-compat
-Wno-c++98-compat-pedantic
-Wno-missing-prototypes
-Wno-unknown-attributes
-Wno-weak-vtables
>
$<$<CXX_COMPILER_ID:GNU>:
-Wdouble-promotion # float implicit to double
-Wlogical-op # suspicious uses of logical operators
-Wuseless-cast # casting to its own type
$<$<NOT:$<VERSION_LESS:$<CXX_COMPILER_VERSION>,6>>:
-Wduplicated-cond # duplicated if-else conditions
-Wmisleading-indentation
-Wnull-dereference
>
$<$<NOT:$<VERSION_LESS:$<CXX_COMPILER_VERSION>,7>>:
-Wduplicated-branches # identical if-else branches
>
$<$<NOT:$<VERSION_LESS:$<CXX_COMPILER_VERSION>,8>>:
-Wcast-align=strict # increase alignment (i.e. char* to int*)
>
>
)
endif(MSVC)
function(add_gsl_test_noexcept name)
add_executable(${name} ${name}.cpp)
target_link_libraries(${name}
GSL
gsl_tests_config_noexcept
${GTestMain_LIBRARIES}
)
add_test(
${name}
${name}
)
# group all tests under GSL_tests_noexcept
set_property(TARGET ${name} PROPERTY FOLDER "GSL_tests_noexcept")
endfunction()
add_gsl_test_noexcept(no_exception_ensure_tests)

14
deps/GSL/tests/CMakeLists.txt.in vendored Normal file
View File

@ -0,0 +1,14 @@
cmake_minimum_required(VERSION 3.0.2)
project(googletest-download NONE)
include(ExternalProject)
ExternalProject_Add(googletest
GIT_REPOSITORY https://github.com/google/googletest.git
GIT_TAG 703bd9caab50b139428cea1aaff9974ebee5742e
SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-src"
BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-build"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

227
deps/GSL/tests/algorithm_tests.cpp vendored Normal file
View File

@ -0,0 +1,227 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <gsl/algorithm> // for copy
#include <gsl/span> // for span
#include <array> // for array
#include <cstddef> // for size_t
namespace
{
static constexpr char deathstring[] = "Expected Death";
}
namespace gsl
{
struct fail_fast;
} // namespace gsl
using namespace std;
using namespace gsl;
TEST(algorithm_tests, same_type)
{
// dynamic source and destination span
{
std::array<int, 5> src{1, 2, 3, 4, 5};
std::array<int, 10> dst{};
const span<int> src_span(src);
const span<int> dst_span(dst);
copy(src_span, dst_span);
copy(src_span, dst_span.subspan(src_span.size()));
for (std::size_t i = 0; i < src.size(); ++i)
{
EXPECT_TRUE(dst[i] == src[i]);
EXPECT_TRUE(dst[i + src.size()] == src[i]);
}
}
// static source and dynamic destination span
{
std::array<int, 5> src{1, 2, 3, 4, 5};
std::array<int, 10> dst{};
const span<int, 5> src_span(src);
const span<int> dst_span(dst);
copy(src_span, dst_span);
copy(src_span, dst_span.subspan(src_span.size()));
for (std::size_t i = 0; i < src.size(); ++i)
{
EXPECT_TRUE(dst[i] == src[i]);
EXPECT_TRUE(dst[i + src.size()] == src[i]);
}
}
// dynamic source and static destination span
{
std::array<int, 5> src{1, 2, 3, 4, 5};
std::array<int, 10> dst{};
const span<int> src_span(src);
const span<int, 10> dst_span(dst);
copy(src_span, dst_span);
copy(src_span, dst_span.subspan(src_span.size()));
for (std::size_t i = 0; i < src.size(); ++i)
{
EXPECT_TRUE(dst[i] == src[i]);
EXPECT_TRUE(dst[i + src.size()] == src[i]);
}
}
// static source and destination span
{
std::array<int, 5> src{1, 2, 3, 4, 5};
std::array<int, 10> dst{};
const span<int, 5> src_span(src);
const span<int, 10> dst_span(dst);
copy(src_span, dst_span);
copy(src_span, dst_span.subspan(src_span.size()));
for (std::size_t i = 0; i < src.size(); ++i)
{
EXPECT_TRUE(dst[i] == src[i]);
EXPECT_TRUE(dst[i + src.size()] == src[i]);
}
}
}
TEST(algorithm_tests, compatible_type)
{
// dynamic source and destination span
{
std::array<short, 5> src{1, 2, 3, 4, 5};
std::array<int, 10> dst{};
const span<short> src_span(src);
const span<int> dst_span(dst);
copy(src_span, dst_span);
copy(src_span, dst_span.subspan(src_span.size()));
for (std::size_t i = 0; i < src.size(); ++i)
{
EXPECT_TRUE(dst[i] == src[i]);
EXPECT_TRUE(dst[i + src.size()] == src[i]);
}
}
// static source and dynamic destination span
{
std::array<short, 5> src{1, 2, 3, 4, 5};
std::array<int, 10> dst{};
const span<short, 5> src_span(src);
const span<int> dst_span(dst);
copy(src_span, dst_span);
copy(src_span, dst_span.subspan(src_span.size()));
for (std::size_t i = 0; i < src.size(); ++i)
{
EXPECT_TRUE(dst[i] == src[i]);
EXPECT_TRUE(dst[i + src.size()] == src[i]);
}
}
// dynamic source and static destination span
{
std::array<short, 5> src{1, 2, 3, 4, 5};
std::array<int, 10> dst{};
const span<short> src_span(src);
const span<int, 10> dst_span(dst);
copy(src_span, dst_span);
copy(src_span, dst_span.subspan(src_span.size()));
for (std::size_t i = 0; i < src.size(); ++i)
{
EXPECT_TRUE(dst[i] == src[i]);
EXPECT_TRUE(dst[i + src.size()] == src[i]);
}
}
// static source and destination span
{
std::array<short, 5> src{1, 2, 3, 4, 5};
std::array<int, 10> dst{};
const span<short, 5> src_span(src);
const span<int, 10> dst_span(dst);
copy(src_span, dst_span);
copy(src_span, dst_span.subspan(src_span.size()));
for (std::size_t i = 0; i < src.size(); ++i)
{
EXPECT_TRUE(dst[i] == src[i]);
EXPECT_TRUE(dst[i + src.size()] == src[i]);
}
}
}
#ifdef CONFIRM_COMPILATION_ERRORS
TEST(algorithm_tests, incompatible_type)
{
std::array<int, 4> src{1, 2, 3, 4};
std::array<int*, 12> dst{};
span<int> src_span_dyn(src);
span<int, 4> src_span_static(src);
span<int*> dst_span_dyn(dst);
span<int*, 4> dst_span_static(dst);
// every line should produce a compilation error
copy(src_span_dyn, dst_span_dyn);
copy(src_span_dyn, dst_span_static);
copy(src_span_static, dst_span_dyn);
copy(src_span_static, dst_span_static);
}
#endif
TEST(algorithm_tests, small_destination_span)
{
std::set_terminate([] {
std::cerr << "Expected Death. small_destination_span";
std::abort();
});
std::array<int, 12> src{1, 2, 3, 4};
std::array<int, 4> dst{};
const span<int> src_span_dyn(src);
const span<int, 12> src_span_static(src);
const span<int> dst_span_dyn(dst);
const span<int, 4> dst_span_static(dst);
EXPECT_DEATH(copy(src_span_dyn, dst_span_dyn), deathstring);
EXPECT_DEATH(copy(src_span_dyn, dst_span_static), deathstring);
EXPECT_DEATH(copy(src_span_static, dst_span_dyn), deathstring);
#ifdef CONFIRM_COMPILATION_ERRORS
copy(src_span_static, dst_span_static);
#endif
}

61
deps/GSL/tests/assertion_tests.cpp vendored Normal file
View File

@ -0,0 +1,61 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <gsl/assert> // for fail_fast (ptr only), Ensures, Expects
using namespace gsl;
namespace
{
static constexpr char deathstring[] = "Expected Death";
int f(int i)
{
Expects(i > 0 && i < 10);
return i;
}
int g(int i)
{
i++;
Ensures(i > 0 && i < 10);
return i;
}
} // namespace
TEST(assertion_tests, expects)
{
std::set_terminate([] {
std::cerr << "Expected Death. expects";
std::abort();
});
EXPECT_TRUE(f(2) == 2);
EXPECT_DEATH(f(10), deathstring);
}
TEST(assertion_tests, ensures)
{
std::set_terminate([] {
std::cerr << "Expected Death. ensures";
std::abort();
});
EXPECT_TRUE(g(2) == 3);
EXPECT_DEATH(g(9), deathstring);
}

135
deps/GSL/tests/at_tests.cpp vendored Normal file
View File

@ -0,0 +1,135 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <gsl/util> // for at
#include <array> // for array
#include <cstddef> // for size_t
#include <initializer_list> // for initializer_list
#include <vector> // for vector
namespace
{
static constexpr char deathstring[] = "Expected Death";
}
TEST(at_tests, static_array)
{
int a[4] = {1, 2, 3, 4};
const int(&c_a)[4] = a;
for (int i = 0; i < 4; ++i) {
EXPECT_TRUE(&gsl::at(a, i) == &a[i]);
EXPECT_TRUE(&gsl::at(c_a, i) == &a[i]);
}
std::set_terminate([] {
std::cerr << "Expected Death. static_array";
std::abort();
});
EXPECT_DEATH(gsl::at(a, -1), deathstring);
EXPECT_DEATH(gsl::at(a, 4), deathstring);
EXPECT_DEATH(gsl::at(c_a, -1), deathstring);
EXPECT_DEATH(gsl::at(c_a, 4), deathstring);
}
TEST(at_tests, std_array)
{
std::array<int, 4> a = {1, 2, 3, 4};
const std::array<int, 4>& c_a = a;
for (int i = 0; i < 4; ++i) {
EXPECT_TRUE(&gsl::at(a, i) == &a[static_cast<std::size_t>(i)]);
EXPECT_TRUE(&gsl::at(c_a, i) == &a[static_cast<std::size_t>(i)]);
}
std::set_terminate([] {
std::cerr << "Expected Death. std_array";
std::abort();
});
EXPECT_DEATH(gsl::at(a, -1), deathstring);
EXPECT_DEATH(gsl::at(a, 4), deathstring);
EXPECT_DEATH(gsl::at(c_a, -1), deathstring);
EXPECT_DEATH(gsl::at(c_a, 4), deathstring);
}
TEST(at_tests, std_vector)
{
std::vector<int> a = {1, 2, 3, 4};
const std::vector<int>& c_a = a;
for (int i = 0; i < 4; ++i) {
EXPECT_TRUE(&gsl::at(a, i) == &a[static_cast<std::size_t>(i)]);
EXPECT_TRUE(&gsl::at(c_a, i) == &a[static_cast<std::size_t>(i)]);
}
std::set_terminate([] {
std::cerr << "Expected Death. std_vector";
std::abort();
});
EXPECT_DEATH(gsl::at(a, -1), deathstring);
EXPECT_DEATH(gsl::at(a, 4), deathstring);
EXPECT_DEATH(gsl::at(c_a, -1), deathstring);
EXPECT_DEATH(gsl::at(c_a, 4), deathstring);
}
TEST(at_tests, InitializerList)
{
const std::initializer_list<int> a = {1, 2, 3, 4};
for (int i = 0; i < 4; ++i) {
EXPECT_TRUE(gsl::at(a, i) == i + 1);
EXPECT_TRUE(gsl::at({1, 2, 3, 4}, i) == i + 1);
}
std::set_terminate([] {
std::cerr << "Expected Death. InitializerList";
std::abort();
});
EXPECT_DEATH(gsl::at(a, -1), deathstring);
EXPECT_DEATH(gsl::at(a, 4), deathstring);
EXPECT_DEATH(gsl::at({1, 2, 3, 4}, -1), deathstring);
EXPECT_DEATH(gsl::at({1, 2, 3, 4}, 4), deathstring);
}
#if !defined(_MSC_VER) || defined(__clang__) || _MSC_VER >= 1910
static constexpr bool test_constexpr()
{
int a1[4] = {1, 2, 3, 4};
const int(&c_a1)[4] = a1;
std::array<int, 4> a2 = {1, 2, 3, 4};
const std::array<int, 4>& c_a2 = a2;
for (int i = 0; i < 4; ++i) {
if (&gsl::at(a1, i) != &a1[i]) return false;
if (&gsl::at(c_a1, i) != &a1[i]) return false;
// requires C++17:
// if (&gsl::at(a2, i) != &a2[static_cast<std::size_t>(i)]) return false;
if (&gsl::at(c_a2, i) != &c_a2[static_cast<std::size_t>(i)]) return false;
if (gsl::at({1, 2, 3, 4}, i) != i + 1) return false;
}
return true;
}
static_assert(test_constexpr(), "FAIL");
#endif

129
deps/GSL/tests/byte_tests.cpp vendored Normal file
View File

@ -0,0 +1,129 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <gsl/byte> // for to_byte, to_integer, byte, operator&, ope...
using namespace std;
using namespace gsl;
namespace
{
int modify_both(gsl::byte& b, int& i)
{
i = 10;
b = to_byte<5>();
return i;
}
TEST(byte_tests, construction)
{
{
const byte b = static_cast<byte>(4);
EXPECT_TRUE(static_cast<unsigned char>(b) == 4);
}
GSL_SUPPRESS(es.49)
{
const byte b = byte(12);
EXPECT_TRUE(static_cast<unsigned char>(b) == 12);
}
{
const byte b = to_byte<12>();
EXPECT_TRUE(static_cast<unsigned char>(b) == 12);
}
{
const unsigned char uc = 12;
const byte b = to_byte(uc);
EXPECT_TRUE(static_cast<unsigned char>(b) == 12);
}
#if defined(__cplusplus) && (__cplusplus >= 201703L)
{
const byte b { 14 };
EXPECT_TRUE(static_cast<unsigned char>(b) == 14);
}
#endif
}
TEST(byte_tests, bitwise_operations)
{
const byte b = to_byte<0xFF>();
byte a = to_byte<0x00>();
EXPECT_TRUE((b | a) == to_byte<0xFF>());
EXPECT_TRUE(a == to_byte<0x00>());
a |= b;
EXPECT_TRUE(a == to_byte<0xFF>());
a = to_byte<0x01>();
EXPECT_TRUE((b & a) == to_byte<0x01>());
a &= b;
EXPECT_TRUE(a == to_byte<0x01>());
EXPECT_TRUE((b ^ a) == to_byte<0xFE>());
EXPECT_TRUE(a == to_byte<0x01>());
a ^= b;
EXPECT_TRUE(a == to_byte<0xFE>());
a = to_byte<0x01>();
EXPECT_TRUE(~a == to_byte<0xFE>());
a = to_byte<0xFF>();
EXPECT_TRUE((a << 4) == to_byte<0xF0>());
EXPECT_TRUE((a >> 4) == to_byte<0x0F>());
a <<= 4;
EXPECT_TRUE(a == to_byte<0xF0>());
a >>= 4;
EXPECT_TRUE(a == to_byte<0x0F>());
}
TEST(byte_tests, to_integer)
{
const byte b = to_byte<0x12>();
EXPECT_TRUE(0x12 == gsl::to_integer<char>(b));
EXPECT_TRUE(0x12 == gsl::to_integer<short>(b));
EXPECT_TRUE(0x12 == gsl::to_integer<long>(b));
EXPECT_TRUE(0x12 == gsl::to_integer<long long>(b));
EXPECT_TRUE(0x12 == gsl::to_integer<unsigned char>(b));
EXPECT_TRUE(0x12 == gsl::to_integer<unsigned short>(b));
EXPECT_TRUE(0x12 == gsl::to_integer<unsigned long>(b));
EXPECT_TRUE(0x12 == gsl::to_integer<unsigned long long>(b));
// EXPECT_TRUE(0x12 == gsl::to_integer<float>(b)); // expect compile-time error
// EXPECT_TRUE(0x12 == gsl::to_integer<double>(b)); // expect compile-time error
}
TEST(byte_tests, aliasing)
{
int i{0};
const int res = modify_both(reinterpret_cast<byte&>(i), i);
EXPECT_TRUE(res == i);
}
}
#ifdef CONFIRM_COMPILATION_ERRORS
copy(src_span_static, dst_span_static);
#endif

View File

@ -0,0 +1,48 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#include <cstdlib> // for std::exit
#include <gsl/span> // for span
int operator_subscript_no_throw() noexcept
{
int arr[10];
const gsl::span<int> sp{arr};
return sp[11];
}
[[noreturn]] void test_terminate() { std::exit(0); }
void setup_termination_handler() noexcept
{
#if defined(GSL_MSVC_USE_STL_NOEXCEPTION_WORKAROUND)
auto& handler = gsl::details::get_terminate_handler();
handler = &test_terminate;
#else
std::set_terminate(test_terminate);
#endif
}
int main() noexcept
{
setup_termination_handler();
operator_subscript_no_throw();
return -1;
}

550
deps/GSL/tests/notnull_tests.cpp vendored Normal file
View File

@ -0,0 +1,550 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <gsl/pointers> // for not_null, operator<, operator<=, operator>
#include <algorithm> // for addressof
#include <memory> // for shared_ptr, make_shared, operator<, opera...
#include <sstream> // for operator<<, ostringstream, basic_ostream:...
#include <stdint.h> // for uint16_t
#include <string> // for basic_string, operator==, string, operator<<
#include <typeinfo> // for type_info
using namespace gsl;
namespace
{
static constexpr char deathstring[] = "Expected Death";
} //namespace
struct MyBase
{
};
struct MyDerived : public MyBase
{
};
struct Unrelated
{
};
// stand-in for a user-defined ref-counted class
template <typename T>
struct RefCounted
{
RefCounted(T* p) : p_(p) {}
operator T*() { return p_; }
T* p_;
};
// user defined smart pointer with comparison operators returning non bool value
template <typename T>
struct CustomPtr
{
CustomPtr(T* p) : p_(p) {}
operator T*() { return p_; }
bool operator!=(std::nullptr_t) const { return p_ != nullptr; }
T* p_ = nullptr;
};
template <typename T, typename U>
std::string operator==(CustomPtr<T> const& lhs, CustomPtr<U> const& rhs)
{
GSL_SUPPRESS(type.1) // NO-FORMAT: attribute
return reinterpret_cast<const void*>(lhs.p_) == reinterpret_cast<const void*>(rhs.p_) ? "true"
: "false";
}
template <typename T, typename U>
std::string operator!=(CustomPtr<T> const& lhs, CustomPtr<U> const& rhs)
{
GSL_SUPPRESS(type.1) // NO-FORMAT: attribute
return reinterpret_cast<const void*>(lhs.p_) != reinterpret_cast<const void*>(rhs.p_) ? "true"
: "false";
}
template <typename T, typename U>
std::string operator<(CustomPtr<T> const& lhs, CustomPtr<U> const& rhs)
{
GSL_SUPPRESS(type.1) // NO-FORMAT: attribute
return reinterpret_cast<const void*>(lhs.p_) < reinterpret_cast<const void*>(rhs.p_) ? "true"
: "false";
}
template <typename T, typename U>
std::string operator>(CustomPtr<T> const& lhs, CustomPtr<U> const& rhs)
{
GSL_SUPPRESS(type.1) // NO-FORMAT: attribute
return reinterpret_cast<const void*>(lhs.p_) > reinterpret_cast<const void*>(rhs.p_) ? "true"
: "false";
}
template <typename T, typename U>
std::string operator<=(CustomPtr<T> const& lhs, CustomPtr<U> const& rhs)
{
GSL_SUPPRESS(type.1) // NO-FORMAT: attribute
return reinterpret_cast<const void*>(lhs.p_) <= reinterpret_cast<const void*>(rhs.p_) ? "true"
: "false";
}
template <typename T, typename U>
std::string operator>=(CustomPtr<T> const& lhs, CustomPtr<U> const& rhs)
{
GSL_SUPPRESS(type.1) // NO-FORMAT: attribute
return reinterpret_cast<const void*>(lhs.p_) >= reinterpret_cast<const void*>(rhs.p_) ? "true"
: "false";
}
struct NonCopyableNonMovable
{
NonCopyableNonMovable() = default;
NonCopyableNonMovable(const NonCopyableNonMovable&) = delete;
NonCopyableNonMovable& operator=(const NonCopyableNonMovable&) = delete;
NonCopyableNonMovable(NonCopyableNonMovable&&) = delete;
NonCopyableNonMovable& operator=(NonCopyableNonMovable&&) = delete;
};
GSL_SUPPRESS(f.4) // NO-FORMAT: attribute
bool helper(not_null<int*> p) { return *p == 12; }
GSL_SUPPRESS(f.4) // NO-FORMAT: attribute
bool helper_const(not_null<const int*> p) { return *p == 12; }
int* return_pointer() { return nullptr; }
TEST(notnull_tests, TestNotNullConstructors)
{
{
#ifdef CONFIRM_COMPILATION_ERRORS
not_null<int*> p = nullptr; // yay...does not compile!
not_null<std::vector<char>*> p1 = 0; // yay...does not compile!
not_null<int*> p2; // yay...does not compile!
std::unique_ptr<int> up = std::make_unique<int>(120);
not_null<int*> p3 = up;
// Forbid non-nullptr assignable types
not_null<std::vector<int>> f(std::vector<int>{1});
not_null<int> z(10);
not_null<std::vector<int>> y({1, 2});
#endif
}
std::set_terminate([] {
std::cerr << "Expected Death. TestNotNullConstructors";
std::abort();
});
{
// from shared pointer
int i = 12;
auto rp = RefCounted<int>(&i);
not_null<int*> p(rp);
EXPECT_TRUE(p.get() == &i);
not_null<std::shared_ptr<int>> x(
std::make_shared<int>(10)); // shared_ptr<int> is nullptr assignable
int* pi = nullptr;
EXPECT_DEATH((not_null<decltype(pi)>(pi)), deathstring);
}
{
// from pointer to local
int t = 42;
not_null<int*> x = &t;
helper(&t);
helper_const(&t);
EXPECT_TRUE(*x == 42);
}
{
// from raw pointer
// from not_null pointer
int t = 42;
int* p = &t;
not_null<int*> x = p;
helper(p);
helper_const(p);
helper(x);
helper_const(x);
EXPECT_TRUE(*x == 42);
}
{
// from raw const pointer
// from not_null const pointer
int t = 42;
const int* cp = &t;
not_null<const int*> x = cp;
helper_const(cp);
helper_const(x);
EXPECT_TRUE(*x == 42);
}
{
// from not_null const pointer, using auto
int t = 42;
const int* cp = &t;
auto x = not_null<const int*>{cp};
EXPECT_TRUE(*x == 42);
}
{
// from returned pointer
EXPECT_DEATH(helper(return_pointer()), deathstring);
EXPECT_DEATH(helper_const(return_pointer()), deathstring);
}
}
template <typename T>
void ostream_helper(T v)
{
not_null<T*> p(&v);
{
std::ostringstream os;
std::ostringstream ref;
os << static_cast<void*>(p);
ref << static_cast<void*>(&v);
EXPECT_TRUE(os.str() == ref.str());
}
{
std::ostringstream os;
std::ostringstream ref;
os << *p;
ref << v;
EXPECT_TRUE(os.str() == ref.str());
}
}
TEST(notnull_tests, TestNotNullostream)
{
ostream_helper<int>(17);
ostream_helper<float>(21.5f);
ostream_helper<double>(3.4566e-7);
ostream_helper<char>('c');
ostream_helper<uint16_t>(0x0123u);
ostream_helper<const char*>("cstring");
ostream_helper<std::string>("string");
}
TEST(notnull_tests, TestNotNullCasting)
{
MyBase base;
MyDerived derived;
Unrelated unrelated;
not_null<Unrelated*> u{&unrelated};
(void) u;
not_null<MyDerived*> p{&derived};
not_null<MyBase*> q(&base);
q = p; // allowed with heterogeneous copy ctor
EXPECT_TRUE(q == p);
#ifdef CONFIRM_COMPILATION_ERRORS
q = u; // no viable conversion possible between MyBase* and Unrelated*
p = q; // not possible to implicitly convert MyBase* to MyDerived*
not_null<Unrelated*> r = p;
not_null<Unrelated*> s = reinterpret_cast<Unrelated*>(p);
#endif
not_null<Unrelated*> t(reinterpret_cast<Unrelated*>(p.get()));
EXPECT_TRUE(reinterpret_cast<void*>(p.get()) == reinterpret_cast<void*>(t.get()));
}
TEST(notnull_tests, TestNotNullAssignment)
{
std::set_terminate([] {
std::cerr << "Expected Death. TestNotNullAssignmentd";
std::abort();
});
int i = 12;
not_null<int*> p(&i);
EXPECT_TRUE(helper(p));
int* q = nullptr;
EXPECT_DEATH(p = not_null<int*>(q), deathstring);
}
TEST(notnull_tests, TestNotNullRawPointerComparison)
{
int ints[2] = {42, 43};
int* p1 = &ints[0];
const int* p2 = &ints[1];
using NotNull1 = not_null<decltype(p1)>;
using NotNull2 = not_null<decltype(p2)>;
EXPECT_TRUE((NotNull1(p1) == NotNull1(p1)) == true);
EXPECT_TRUE((NotNull1(p1) == NotNull2(p2)) == false);
EXPECT_TRUE((NotNull1(p1) != NotNull1(p1)) == false);
EXPECT_TRUE((NotNull1(p1) != NotNull2(p2)) == true);
EXPECT_TRUE((NotNull1(p1) < NotNull1(p1)) == false);
EXPECT_TRUE((NotNull1(p1) < NotNull2(p2)) == (p1 < p2));
EXPECT_TRUE((NotNull2(p2) < NotNull1(p1)) == (p2 < p1));
EXPECT_TRUE((NotNull1(p1) > NotNull1(p1)) == false);
EXPECT_TRUE((NotNull1(p1) > NotNull2(p2)) == (p1 > p2));
EXPECT_TRUE((NotNull2(p2) > NotNull1(p1)) == (p2 > p1));
EXPECT_TRUE((NotNull1(p1) <= NotNull1(p1)) == true);
EXPECT_TRUE((NotNull1(p1) <= NotNull2(p2)) == (p1 <= p2));
EXPECT_TRUE((NotNull2(p2) <= NotNull1(p1)) == (p2 <= p1));
}
TEST(notnull_tests, TestNotNullDereferenceOperator)
{
{
auto sp1 = std::make_shared<NonCopyableNonMovable>();
using NotNullSp1 = not_null<decltype(sp1)>;
EXPECT_TRUE(typeid(*sp1) == typeid(*NotNullSp1(sp1)));
EXPECT_TRUE(std::addressof(*NotNullSp1(sp1)) == std::addressof(*sp1));
}
{
int ints[1] = {42};
CustomPtr<int> p1(&ints[0]);
using NotNull1 = not_null<decltype(p1)>;
EXPECT_TRUE(typeid(*NotNull1(p1)) == typeid(*p1));
EXPECT_TRUE(*NotNull1(p1) == 42);
*NotNull1(p1) = 43;
EXPECT_TRUE(ints[0] == 43);
}
{
int v = 42;
gsl::not_null<int*> p(&v);
EXPECT_TRUE(typeid(*p) == typeid(*(&v)));
*p = 43;
EXPECT_TRUE(v == 43);
}
}
TEST(notnull_tests, TestNotNullSharedPtrComparison)
{
auto sp1 = std::make_shared<int>(42);
auto sp2 = std::make_shared<const int>(43);
using NotNullSp1 = not_null<decltype(sp1)>;
using NotNullSp2 = not_null<decltype(sp2)>;
EXPECT_TRUE((NotNullSp1(sp1) == NotNullSp1(sp1)) == true);
EXPECT_TRUE((NotNullSp1(sp1) == NotNullSp2(sp2)) == false);
EXPECT_TRUE((NotNullSp1(sp1) != NotNullSp1(sp1)) == false);
EXPECT_TRUE((NotNullSp1(sp1) != NotNullSp2(sp2)) == true);
EXPECT_TRUE((NotNullSp1(sp1) < NotNullSp1(sp1)) == false);
EXPECT_TRUE((NotNullSp1(sp1) < NotNullSp2(sp2)) == (sp1 < sp2));
EXPECT_TRUE((NotNullSp2(sp2) < NotNullSp1(sp1)) == (sp2 < sp1));
EXPECT_TRUE((NotNullSp1(sp1) > NotNullSp1(sp1)) == false);
EXPECT_TRUE((NotNullSp1(sp1) > NotNullSp2(sp2)) == (sp1 > sp2));
EXPECT_TRUE((NotNullSp2(sp2) > NotNullSp1(sp1)) == (sp2 > sp1));
EXPECT_TRUE((NotNullSp1(sp1) <= NotNullSp1(sp1)) == true);
EXPECT_TRUE((NotNullSp1(sp1) <= NotNullSp2(sp2)) == (sp1 <= sp2));
EXPECT_TRUE((NotNullSp2(sp2) <= NotNullSp1(sp1)) == (sp2 <= sp1));
EXPECT_TRUE((NotNullSp1(sp1) >= NotNullSp1(sp1)) == true);
EXPECT_TRUE((NotNullSp1(sp1) >= NotNullSp2(sp2)) == (sp1 >= sp2));
EXPECT_TRUE((NotNullSp2(sp2) >= NotNullSp1(sp1)) == (sp2 >= sp1));
}
TEST(notnull_tests, TestNotNullCustomPtrComparison)
{
int ints[2] = {42, 43};
CustomPtr<int> p1(&ints[0]);
CustomPtr<const int> p2(&ints[1]);
using NotNull1 = not_null<decltype(p1)>;
using NotNull2 = not_null<decltype(p2)>;
EXPECT_TRUE((NotNull1(p1) == NotNull1(p1)) == "true");
EXPECT_TRUE((NotNull1(p1) == NotNull2(p2)) == "false");
EXPECT_TRUE((NotNull1(p1) != NotNull1(p1)) == "false");
EXPECT_TRUE((NotNull1(p1) != NotNull2(p2)) == "true");
EXPECT_TRUE((NotNull1(p1) < NotNull1(p1)) == "false");
EXPECT_TRUE((NotNull1(p1) < NotNull2(p2)) == (p1 < p2));
EXPECT_TRUE((NotNull2(p2) < NotNull1(p1)) == (p2 < p1));
EXPECT_TRUE((NotNull1(p1) > NotNull1(p1)) == "false");
EXPECT_TRUE((NotNull1(p1) > NotNull2(p2)) == (p1 > p2));
EXPECT_TRUE((NotNull2(p2) > NotNull1(p1)) == (p2 > p1));
EXPECT_TRUE((NotNull1(p1) <= NotNull1(p1)) == "true");
EXPECT_TRUE((NotNull1(p1) <= NotNull2(p2)) == (p1 <= p2));
EXPECT_TRUE((NotNull2(p2) <= NotNull1(p1)) == (p2 <= p1));
EXPECT_TRUE((NotNull1(p1) >= NotNull1(p1)) == "true");
EXPECT_TRUE((NotNull1(p1) >= NotNull2(p2)) == (p1 >= p2));
EXPECT_TRUE((NotNull2(p2) >= NotNull1(p1)) == (p2 >= p1));
}
#if defined(__cplusplus) && (__cplusplus >= 201703L)
TEST(notnull_tests, TestNotNullConstructorTypeDeduction)
{
{
int i = 42;
not_null x{&i};
helper(not_null{&i});
helper_const(not_null{&i});
EXPECT_TRUE(*x == 42);
}
{
int i = 42;
int* p = &i;
not_null x{p};
helper(not_null{p});
helper_const(not_null{p});
EXPECT_TRUE(*x == 42);
}
std::set_terminate([] {
std::cerr << "Expected Death. TestNotNullConstructorTypeDeduction";
std::abort();
});
{
auto workaround_macro = []() {
int* p1 = nullptr;
const not_null x{p1};
};
EXPECT_DEATH(workaround_macro(), deathstring);
}
{
auto workaround_macro = []() {
const int* p1 = nullptr;
const not_null x{p1};
};
EXPECT_DEATH(workaround_macro(), deathstring);
}
{
int* p = nullptr;
EXPECT_DEATH(helper(not_null{p}), deathstring);
EXPECT_DEATH(helper_const(not_null{p}), deathstring);
}
#ifdef CONFIRM_COMPILATION_ERRORS
{
not_null x{nullptr};
helper(not_null{nullptr});
helper_const(not_null{nullptr});
}
#endif
}
#endif // #if defined(__cplusplus) && (__cplusplus >= 201703L)
TEST(notnull_tests, TestMakeNotNull)
{
{
int i = 42;
const auto x = make_not_null(&i);
helper(make_not_null(&i));
helper_const(make_not_null(&i));
EXPECT_TRUE(*x == 42);
}
{
int i = 42;
int* p = &i;
const auto x = make_not_null(p);
helper(make_not_null(p));
helper_const(make_not_null(p));
EXPECT_TRUE(*x == 42);
}
std::set_terminate([] {
std::cerr << "Expected Death. TestMakeNotNull";
std::abort();
});
{
const auto workaround_macro = []() {
int* p1 = nullptr;
const auto x = make_not_null(p1);
EXPECT_TRUE(*x == 42);
};
EXPECT_DEATH(workaround_macro(), deathstring);
}
{
const auto workaround_macro = []() {
const int* p1 = nullptr;
const auto x = make_not_null(p1);
EXPECT_TRUE(*x == 42);
};
EXPECT_DEATH(workaround_macro(), deathstring);
}
{
int* p = nullptr;
EXPECT_DEATH(helper(make_not_null(p)), deathstring);
EXPECT_DEATH(helper_const(make_not_null(p)), deathstring);
}
#ifdef CONFIRM_COMPILATION_ERRORS
{
EXPECT_DEATH(make_not_null(nullptr), deathstring);
EXPECT_DEATH(helper(make_not_null(nullptr)), deathstring);
EXPECT_DEATH(helper_const(make_not_null(nullptr)), deathstring);
}
#endif
}
TEST(notnull_tests, TestStdHash)
{
int x = 42;
int y = 99;
not_null<int*> nn{&x};
const not_null<int*> cnn{&x};
std::hash<not_null<int*>> hash_nn;
std::hash<int*> hash_intptr;
EXPECT_TRUE(hash_nn(nn) == hash_intptr(&x));
EXPECT_FALSE(hash_nn(nn) == hash_intptr(&y));
EXPECT_FALSE(hash_nn(nn) == hash_intptr(nullptr));
}

43
deps/GSL/tests/owner_tests.cpp vendored Normal file
View File

@ -0,0 +1,43 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <gsl/pointers> // for owner
using namespace gsl;
GSL_SUPPRESS(f.23) // NO-FORMAT: attribute
void f(int* i) { *i += 1; }
TEST(owner_tests, basic_test)
{
owner<int*> p = new int(120);
EXPECT_TRUE(*p == 120);
f(p);
EXPECT_TRUE(*p == 121);
delete p;
}
TEST(owner_tests, check_pointer_constraint)
{
#ifdef CONFIRM_COMPILATION_ERRORS
{
owner<int> integerTest = 10;
owner<std::shared_ptr<int>> sharedPtrTest(new int(10));
}
#endif
}

File diff suppressed because it is too large Load Diff

360
deps/GSL/tests/span_ext_tests.cpp vendored Normal file
View File

@ -0,0 +1,360 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <gsl/util> // for narrow_cast, at
#include <gsl/span_ext> // for operator==, operator!=, make_span
#include <array> // for array
#include <iostream> // for cerr
#include <vector> // for vector
using namespace std;
using namespace gsl;
namespace
{
static constexpr char deathstring[] = "Expected Death";
} // namespace
TEST(span_ext_test, make_span_from_pointer_length_constructor)
{
std::set_terminate([] {
std::cerr << "Expected Death. from_pointer_length_constructor";
std::abort();
});
int arr[4] = {1, 2, 3, 4};
{
auto s = make_span(&arr[0], 2);
EXPECT_TRUE(s.size() == 2);
EXPECT_TRUE(s.data() == &arr[0]);
EXPECT_TRUE(s[0] == 1);
EXPECT_TRUE(s[1] == 2);
}
{
int* p = nullptr;
auto s = make_span(p, narrow_cast<span<int>::size_type>(0));
EXPECT_TRUE(s.size() == 0);
EXPECT_TRUE(s.data() == nullptr);
}
{
int* p = nullptr;
auto workaround_macro = [=]() { make_span(p, 2); };
EXPECT_DEATH(workaround_macro(), deathstring);
}
}
TEST(span_ext_test, make_span_from_pointer_pointer_construction)
{
int arr[4] = {1, 2, 3, 4};
{
auto s = make_span(&arr[0], &arr[2]);
EXPECT_TRUE(s.size() == 2);
EXPECT_TRUE(s.data() == &arr[0]);
EXPECT_TRUE(s[0] == 1);
EXPECT_TRUE(s[1] == 2);
}
{
auto s = make_span(&arr[0], &arr[0]);
EXPECT_TRUE(s.size() == 0);
EXPECT_TRUE(s.data() == &arr[0]);
}
{
int* p = nullptr;
auto s = make_span(p, p);
EXPECT_TRUE(s.size() == 0);
EXPECT_TRUE(s.data() == nullptr);
}
}
TEST(span_ext_test, make_span_from_array_constructor)
{
int arr[5] = {1, 2, 3, 4, 5};
int arr2d[2][3] = {1, 2, 3, 4, 5, 6};
int arr3d[2][3][2] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
{
const auto s = make_span(arr);
EXPECT_TRUE(s.size() == 5);
EXPECT_TRUE(s.data() == std::addressof(arr[0]));
}
{
const auto s = make_span(std::addressof(arr2d[0]), 1);
EXPECT_TRUE(s.size() == 1);
EXPECT_TRUE(s.data() == std::addressof(arr2d[0]));
}
{
const auto s = make_span(std::addressof(arr3d[0]), 1);
EXPECT_TRUE(s.size() == 1);
EXPECT_TRUE(s.data() == std::addressof(arr3d[0]));
}
}
TEST(span_ext_test, make_span_from_dynamic_array_constructor)
{
double(*arr)[3][4] = new double[100][3][4];
{
auto s = make_span(&arr[0][0][0], 10);
EXPECT_TRUE(s.size() == 10);
EXPECT_TRUE(s.data() == &arr[0][0][0]);
}
delete[] arr;
}
TEST(span_ext_test, make_span_from_std_array_constructor)
{
std::array<int, 4> arr = {1, 2, 3, 4};
{
auto s = make_span(arr);
EXPECT_TRUE(s.size() == arr.size());
EXPECT_TRUE(s.data() == arr.data());
}
// This test checks for the bug found in gcc 6.1, 6.2, 6.3, 6.4, 6.5 7.1, 7.2, 7.3 - issue #590
{
span<int> s1 = make_span(arr);
static span<int> s2;
s2 = s1;
#if defined(__GNUC__) && __GNUC__ == 6 && (__GNUC_MINOR__ == 4 || __GNUC_MINOR__ == 5) && \
__GNUC_PATCHLEVEL__ == 0 && defined(__OPTIMIZE__)
// Known to be broken in gcc 6.4 and 6.5 with optimizations
// Issue in gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83116
EXPECT_TRUE(s1.size() == 4);
EXPECT_TRUE(s2.size() == 0);
#else
EXPECT_TRUE(s1.size() == s2.size());
#endif
}
}
TEST(span_ext_test, make_span_from_const_std_array_constructor)
{
const std::array<int, 4> arr = {1, 2, 3, 4};
{
auto s = make_span(arr);
EXPECT_TRUE(s.size() == arr.size());
EXPECT_TRUE(s.data() == arr.data());
}
}
TEST(span_ext_test, make_span_from_std_array_const_constructor)
{
std::array<const int, 4> arr = {1, 2, 3, 4};
{
auto s = make_span(arr);
EXPECT_TRUE(s.size() == arr.size());
EXPECT_TRUE(s.data() == arr.data());
}
}
TEST(span_ext_test, make_span_from_container_constructor)
{
std::vector<int> v = {1, 2, 3};
const std::vector<int> cv = v;
{
auto s = make_span(v);
EXPECT_TRUE(s.size() == v.size());
EXPECT_TRUE(s.data() == v.data());
auto cs = make_span(cv);
EXPECT_TRUE(cs.size() == cv.size());
EXPECT_TRUE(cs.data() == cv.data());
}
}
TEST(span_test, interop_with_gsl_at)
{
int arr[5] = {1, 2, 3, 4, 5};
span<int> s{arr};
EXPECT_TRUE(at(s, 0) == 1);
EXPECT_TRUE(at(s, 1) == 2);
}
TEST(span_ext_test, iterator_free_functions)
{
int a[] = {1, 2, 3, 4};
span<int> s{a};
EXPECT_TRUE((std::is_same<decltype(s.begin()), decltype(begin(s))>::value));
EXPECT_TRUE((std::is_same<decltype(s.end()), decltype(end(s))>::value));
EXPECT_TRUE((std::is_same<decltype(std::cbegin(s)), decltype(cbegin(s))>::value));
EXPECT_TRUE((std::is_same<decltype(std::cend(s)), decltype(cend(s))>::value));
EXPECT_TRUE((std::is_same<decltype(s.rbegin()), decltype(rbegin(s))>::value));
EXPECT_TRUE((std::is_same<decltype(s.rend()), decltype(rend(s))>::value));
EXPECT_TRUE((std::is_same<decltype(std::crbegin(s)), decltype(crbegin(s))>::value));
EXPECT_TRUE((std::is_same<decltype(std::crend(s)), decltype(crend(s))>::value));
EXPECT_TRUE(s.begin() == begin(s));
EXPECT_TRUE(s.end() == end(s));
EXPECT_TRUE(s.rbegin() == rbegin(s));
EXPECT_TRUE(s.rend() == rend(s));
EXPECT_TRUE(s.begin() == cbegin(s));
EXPECT_TRUE(s.end() == cend(s));
EXPECT_TRUE(s.rbegin() == crbegin(s));
EXPECT_TRUE(s.rend() == crend(s));
}
TEST(span_ext_test, ssize_free_function)
{
int a[] = {1, 2, 3, 4};
span<int> s{a};
EXPECT_FALSE((std::is_same<decltype(s.size()), decltype(ssize(s))>::value));
EXPECT_TRUE(s.size() == static_cast<std::size_t>(ssize(s)));
}
TEST(span_ext_test, comparison_operators)
{
{
span<int> s1;
span<int> s2;
EXPECT_TRUE(s1 == s2);
EXPECT_FALSE(s1 != s2);
EXPECT_FALSE(s1 < s2);
EXPECT_TRUE(s1 <= s2);
EXPECT_FALSE(s1 > s2);
EXPECT_TRUE(s1 >= s2);
EXPECT_TRUE(s2 == s1);
EXPECT_FALSE(s2 != s1);
EXPECT_FALSE(s2 != s1);
EXPECT_TRUE(s2 <= s1);
EXPECT_FALSE(s2 > s1);
EXPECT_TRUE(s2 >= s1);
}
{
int arr[] = {2, 1};
span<int> s1 = arr;
span<int> s2 = arr;
EXPECT_TRUE(s1 == s2);
EXPECT_FALSE(s1 != s2);
EXPECT_FALSE(s1 < s2);
EXPECT_TRUE(s1 <= s2);
EXPECT_FALSE(s1 > s2);
EXPECT_TRUE(s1 >= s2);
EXPECT_TRUE(s2 == s1);
EXPECT_FALSE(s2 != s1);
EXPECT_FALSE(s2 < s1);
EXPECT_TRUE(s2 <= s1);
EXPECT_FALSE(s2 > s1);
EXPECT_TRUE(s2 >= s1);
}
{
int arr[] = {2, 1}; // bigger
span<int> s1;
span<int> s2 = arr;
EXPECT_TRUE(s1 != s2);
EXPECT_TRUE(s2 != s1);
EXPECT_FALSE(s1 == s2);
EXPECT_FALSE(s2 == s1);
EXPECT_TRUE(s1 < s2);
EXPECT_FALSE(s2 < s1);
EXPECT_TRUE(s1 <= s2);
EXPECT_FALSE(s2 <= s1);
EXPECT_TRUE(s2 > s1);
EXPECT_FALSE(s1 > s2);
EXPECT_TRUE(s2 >= s1);
EXPECT_FALSE(s1 >= s2);
}
{
int arr1[] = {1, 2};
int arr2[] = {1, 2};
span<int> s1 = arr1;
span<int> s2 = arr2;
EXPECT_TRUE(s1 == s2);
EXPECT_FALSE(s1 != s2);
EXPECT_FALSE(s1 < s2);
EXPECT_TRUE(s1 <= s2);
EXPECT_FALSE(s1 > s2);
EXPECT_TRUE(s1 >= s2);
EXPECT_TRUE(s2 == s1);
EXPECT_FALSE(s2 != s1);
EXPECT_FALSE(s2 < s1);
EXPECT_TRUE(s2 <= s1);
EXPECT_FALSE(s2 > s1);
EXPECT_TRUE(s2 >= s1);
}
{
int arr[] = {1, 2, 3};
span<int> s1 = {&arr[0], 2}; // shorter
span<int> s2 = arr; // longer
EXPECT_TRUE(s1 != s2);
EXPECT_TRUE(s2 != s1);
EXPECT_FALSE(s1 == s2);
EXPECT_FALSE(s2 == s1);
EXPECT_TRUE(s1 < s2);
EXPECT_FALSE(s2 < s1);
EXPECT_TRUE(s1 <= s2);
EXPECT_FALSE(s2 <= s1);
EXPECT_TRUE(s2 > s1);
EXPECT_FALSE(s1 > s2);
EXPECT_TRUE(s2 >= s1);
EXPECT_FALSE(s1 >= s2);
}
{
int arr1[] = {1, 2}; // smaller
int arr2[] = {2, 1}; // bigger
span<int> s1 = arr1;
span<int> s2 = arr2;
EXPECT_TRUE(s1 != s2);
EXPECT_TRUE(s2 != s1);
EXPECT_FALSE(s1 == s2);
EXPECT_FALSE(s2 == s1);
EXPECT_TRUE(s1 < s2);
EXPECT_FALSE(s2 < s1);
EXPECT_TRUE(s1 <= s2);
EXPECT_FALSE(s2 <= s1);
EXPECT_TRUE(s2 > s1);
EXPECT_FALSE(s1 > s2);
EXPECT_TRUE(s2 >= s1);
EXPECT_FALSE(s1 >= s2);
}
}

1278
deps/GSL/tests/span_tests.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

190
deps/GSL/tests/strict_notnull_tests.cpp vendored Normal file
View File

@ -0,0 +1,190 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <gsl/pointers> // for not_null, operator<, operator<=, operator>
namespace gsl
{
struct fail_fast;
} // namespace gsl
using namespace gsl;
GSL_SUPPRESS(f.4) // NO-FORMAT: attribute
bool helper(not_null<int*> p) { return *p == 12; }
GSL_SUPPRESS(f.4) // NO-FORMAT: attribute
bool helper_const(not_null<const int*> p) { return *p == 12; }
GSL_SUPPRESS(f.4) // NO-FORMAT: attribute
bool strict_helper(strict_not_null<int*> p) { return *p == 12; }
GSL_SUPPRESS(f.4) // NO-FORMAT: attribute
bool strict_helper_const(strict_not_null<const int*> p) { return *p == 12; }
int* return_pointer() { return nullptr; }
const int* return_pointer_const() { return nullptr; }
TEST(strict_notnull_tests, TestStrictNotNull)
{
{
// raw ptr <-> strict_not_null
int x = 42;
#ifdef CONFIRM_COMPILATION_ERRORS
strict_not_null<int*> snn = &x;
strict_helper(&x);
strict_helper_const(&x);
strict_helper(return_pointer());
strict_helper_const(return_pointer_const());
#endif
const strict_not_null<int*> snn1{&x};
helper(snn1);
helper_const(snn1);
EXPECT_TRUE(*snn1 == 42);
}
{
// strict_not_null -> strict_not_null
int x = 42;
strict_not_null<int*> snn1{&x};
const strict_not_null<int*> snn2{&x};
strict_helper(snn1);
strict_helper_const(snn1);
strict_helper_const(snn2);
EXPECT_TRUE(snn1 == snn2);
}
{
// strict_not_null -> not_null
int x = 42;
strict_not_null<int*> snn{&x};
const not_null<int*> nn1 = snn;
const not_null<int*> nn2{snn};
helper(snn);
helper_const(snn);
EXPECT_TRUE(snn == nn1);
EXPECT_TRUE(snn == nn2);
}
{
// not_null -> strict_not_null
int x = 42;
not_null<int*> nn{&x};
const strict_not_null<int*> snn1{nn};
const strict_not_null<int*> snn2{nn};
strict_helper(nn);
strict_helper_const(nn);
EXPECT_TRUE(snn1 == nn);
EXPECT_TRUE(snn2 == nn);
std::hash<strict_not_null<int*>> hash_snn;
std::hash<not_null<int*>> hash_nn;
EXPECT_TRUE(hash_nn(snn1) == hash_nn(nn));
EXPECT_TRUE(hash_snn(snn1) == hash_nn(nn));
EXPECT_TRUE(hash_nn(snn1) == hash_nn(snn2));
EXPECT_TRUE(hash_snn(snn1) == hash_snn(nn));
}
#ifdef CONFIRM_COMPILATION_ERRORS
{
strict_not_null<int*> p{nullptr};
}
#endif
}
#if defined(__cplusplus) && (__cplusplus >= 201703L)
namespace
{
static constexpr char deathstring[] = "Expected Death";
}
TEST(strict_notnull_tests, TestStrictNotNullConstructorTypeDeduction)
{
std::set_terminate([] {
std::cerr << "Expected Death. TestStrictNotNullConstructorTypeDeduction";
std::abort();
});
{
int i = 42;
strict_not_null x{&i};
helper(strict_not_null{&i});
helper_const(strict_not_null{&i});
EXPECT_TRUE(*x == 42);
}
{
int i = 42;
int* p = &i;
strict_not_null x{p};
helper(strict_not_null{p});
helper_const(strict_not_null{p});
EXPECT_TRUE(*x == 42);
}
{
auto workaround_macro = []() {
int* p1 = nullptr;
const strict_not_null x{p1};
};
EXPECT_DEATH(workaround_macro(), deathstring);
}
{
auto workaround_macro = []() {
const int* p1 = nullptr;
const strict_not_null x{p1};
};
EXPECT_DEATH(workaround_macro(), deathstring);
}
{
int* p = nullptr;
EXPECT_DEATH(helper(strict_not_null{p}), deathstring);
EXPECT_DEATH(helper_const(strict_not_null{p}), deathstring);
}
#ifdef CONFIRM_COMPILATION_ERRORS
{
strict_not_null x{nullptr};
helper(strict_not_null{nullptr});
helper_const(strict_not_null{nullptr});
}
#endif
}
#endif // #if defined(__cplusplus) && (__cplusplus >= 201703L)

1217
deps/GSL/tests/string_span_tests.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

147
deps/GSL/tests/utils_tests.cpp vendored Normal file
View File

@ -0,0 +1,147 @@
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <gsl/util> // finally, narrow_cast
#include <gsl/narrow> // for narrow, narrowing_error
#include <algorithm> // for move
#include <functional> // for reference_wrapper, _Bind_helper<>::type
#include <limits> // for numeric_limits
#include <stdint.h> // for uint32_t, int32_t
#include <type_traits> // for is_same
#include <cstddef> // for std::ptrdiff_t
using namespace gsl;
namespace
{
void f(int& i) { i += 1; }
static int j = 0;
void g() { j += 1; }
}
TEST(utils_tests, sanity_check_for_gsl_index_typedef)
{
static_assert(std::is_same<gsl::index, std::ptrdiff_t>::value,
"gsl::index represents wrong arithmetic type");
}
TEST(utils_tests, finally_lambda)
{
int i = 0;
{
auto _ = finally([&]() { f(i); });
EXPECT_TRUE(i == 0);
}
EXPECT_TRUE(i == 1);
}
TEST(utils_tests, finally_lambda_move)
{
int i = 0;
{
auto _1 = finally([&]() { f(i); });
{
auto _2 = std::move(_1);
EXPECT_TRUE(i == 0);
}
EXPECT_TRUE(i == 1);
{
auto _2 = std::move(_1);
EXPECT_TRUE(i == 1);
}
EXPECT_TRUE(i == 1);
}
EXPECT_TRUE(i == 1);
}
TEST(utils_tests, finally_const_lvalue_lambda)
{
int i = 0;
{
const auto const_lvalue_lambda = [&]() { f(i); };
auto _ = finally(const_lvalue_lambda);
EXPECT_TRUE(i == 0);
}
EXPECT_TRUE(i == 1);
}
TEST(utils_tests, finally_mutable_lvalue_lambda)
{
int i = 0;
{
auto mutable_lvalue_lambda = [&]() { f(i); };
auto _ = finally(mutable_lvalue_lambda);
EXPECT_TRUE(i == 0);
}
EXPECT_TRUE(i == 1);
}
TEST(utils_tests, finally_function_with_bind)
{
int i = 0;
{
auto _ = finally(std::bind(&f, std::ref(i)));
EXPECT_TRUE(i == 0);
}
EXPECT_TRUE(i == 1);
}
TEST(utils_tests, finally_function_ptr)
{
j = 0;
{
auto _ = finally(&g);
EXPECT_TRUE(j == 0);
}
EXPECT_TRUE(j == 1);
}
TEST(utils_tests, narrow_cast)
{
int n = 120;
char c = narrow_cast<char>(n);
EXPECT_TRUE(c == 120);
n = 300;
unsigned char uc = narrow_cast<unsigned char>(n);
EXPECT_TRUE(uc == 44);
}
TEST(utils_tests, narrow)
{
int n = 120;
const char c = narrow<char>(n);
EXPECT_TRUE(c == 120);
n = 300;
EXPECT_THROW(narrow<char>(n), narrowing_error);
const auto int32_max = std::numeric_limits<int32_t>::max();
const auto int32_min = std::numeric_limits<int32_t>::min();
EXPECT_TRUE(narrow<uint32_t>(int32_t(0)) == 0);
EXPECT_TRUE(narrow<uint32_t>(int32_t(1)) == 1);
EXPECT_TRUE(narrow<uint32_t>(int32_max) == static_cast<uint32_t>(int32_max));
EXPECT_THROW(narrow<uint32_t>(int32_t(-1)), narrowing_error);
EXPECT_THROW(narrow<uint32_t>(int32_min), narrowing_error);
n = -42;
EXPECT_THROW(narrow<unsigned>(n), narrowing_error);
}

10
deps/asmjit/.editorconfig vendored Normal file
View File

@ -0,0 +1,10 @@
# Editor configuration, see https://editorconfig.org for more details.
root = true
[*.{cpp,h,natvis}]
charset = utf-8
end_of_line = lf
indent_style = space
indent_size = 2
insert_final_newline = true
trim_trailing_whitespace = true

1
deps/asmjit/.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1 @@
github: kobalicek

View File

@ -0,0 +1,39 @@
{
"diagnostics": {
"asan": { "definitions": ["ASMJIT_SANITIZE=address"] },
"ubsan": { "definitions": ["ASMJIT_SANITIZE=undefined"] }
},
"valgrind_arguments": [
"--leak-check=full",
"--show-reachable=yes",
"--track-origins=yes"
],
"tests": [
{
"cmd": ["asmjit_test_unit", "--quick"],
"optional": true
},
{
"cmd": ["asmjit_test_opcode", "--quiet"],
"optional": true
},
{
"cmd": ["asmjit_test_x86_asm"],
"optional": true
},
{
"cmd": ["asmjit_test_x86_sections"],
"optional": true
},
{
"cmd": ["asmjit_test_x86_instinfo"],
"optional": true
},
{
"cmd": ["asmjit_test_compiler"],
"optional": true
}
]
}

136
deps/asmjit/.github/workflows/build.yml vendored Normal file
View File

@ -0,0 +1,136 @@
name: "Build"
on:
push:
pull_request:
defaults:
run:
shell: bash
jobs:
source-check:
name: "source check"
runs-on: ubuntu-latest
steps:
- name: "Checkout"
uses: actions/checkout@v2
- name: "Setup node.js"
uses: actions/setup-node@v1
with:
node-version: "14"
- name: "Check Enumerations"
run: |
cd tools
node enumgen.js --verify
build:
strategy:
fail-fast: false
matrix:
include:
- { title: "linux-lib" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", problem_matcher: "cpp" }
- { title: "windows-lib" , os: "windows-latest", cc: "vs2019" , arch: "x86", build_type: "Debug" , problem_matcher: "cpp" }
- { title: "diag-asan" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON", diagnostics: "address" }
- { title: "diag-ubsan" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON", diagnostics: "undefined" }
- { title: "diag-valgrind" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON", diagnostics: "valgrind" }
- { title: "no-deprecated" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_DEPRECATED=1" }
- { title: "no-intrinsics" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_INTRINSICS=1" }
- { title: "no-logging" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_LOGGING=1" }
- { title: "no-builder" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_BUILDER=1" }
- { title: "no-compiler" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_COMPILER=1" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-4.8" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-4.8" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-4.8" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-4.8" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-5" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-5" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-6" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-6" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-7" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-7" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-8" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-8" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-9" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-9" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-10" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-10" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-10" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc-10" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang-9" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang-9" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang-9" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang-9" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang-10", arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang-10", arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang-10", arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang-10", arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "osx-10.15" , os: "macos-10.15" , cc: "gcc-9" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "osx-10.15" , os: "macos-10.15" , cc: "gcc-9" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "osx-10.15" , os: "macos-10.15" , cc: "clang" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "osx-10.15" , os: "macos-10.15" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "osx-11.0" , os: "macos-11.0" , cc: "gcc-9" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "osx-11.0" , os: "macos-11.0" , cc: "gcc-9" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "osx-11.0" , os: "macos-11.0" , cc: "clang" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "osx-11.0" , os: "macos-11.0" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-latest", cc: "vs2019" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-latest", cc: "vs2019" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-latest", cc: "vs2019" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-latest", cc: "vs2019" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
name: "${{matrix.title}} (${{matrix.cc}}, ${{matrix.arch}}, ${{matrix.build_type}})"
runs-on: "${{matrix.os}}"
steps:
- name: "Checkout"
uses: actions/checkout@v2
with:
path: "source"
- name: "Checkout build-actions"
run: git clone https://github.com/build-actions/build-actions.git build-actions --depth=1
- name: "Python"
uses: actions/setup-python@v2
with:
python-version: "3.x"
- name: "Prepare"
run: python build-actions/action.py
--step=prepare
--compiler=${{matrix.cc}}
--diagnostics=${{matrix.diagnostics}}
--architecture=${{matrix.arch}}
- name: "Configure"
run: python build-actions/action.py
--step=configure
--config=source/.github/workflows/build-config.json
--source-dir=source
--compiler=${{matrix.cc}}
--diagnostics=${{matrix.diagnostics}}
--architecture=${{matrix.arch}}
--build-type=${{matrix.build_type}}
--build-defs=${{matrix.defs}}
--problem-matcher=${{matrix.problem_matcher}}
- name: "Build"
run: python build-actions/action.py --step=build
- name: "Test"
run: python build-actions/action.py --step=test

6
deps/asmjit/.gitignore vendored Normal file
View File

@ -0,0 +1,6 @@
.vscode
.kdev4
*.kdev4
build
build_*
tools/asmdb

549
deps/asmjit/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,549 @@
cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
cmake_policy(PUSH)
if(POLICY CMP0063)
cmake_policy(SET CMP0063 NEW) # Honor visibility properties.
endif()
if(POLICY CMP0092)
cmake_policy(SET CMP0092 NEW) # Don't add -W3 warning level by default.
endif()
# Don't create a project if it was already created by another CMakeLists.txt.
# This allows one library to embed another library without making a collision.
if (NOT CMAKE_PROJECT_NAME OR "${CMAKE_PROJECT_NAME}" STREQUAL "asmjit")
project(asmjit CXX)
endif()
include(CheckCXXCompilerFlag)
include(GNUInstallDirs)
# =============================================================================
# [AsmJit - Deprecated]
# =============================================================================
if (DEFINED ASMJIT_BUILD_EMBED)
message(DEPRECATION "ASMJIT_BUILD_EMBED is deprecated, use ASMJIT_EMBED")
set(ASMJIT_EMBED "${ASMJIT_BUILD_EMBED}")
endif()
if (DEFINED ASMJIT_BUILD_STATIC)
message(DEPRECATION "ASMJIT_BUILD_STATIC is deprecated, use ASMJIT_STATIC")
set(ASMJIT_STATIC "${ASMJIT_BUILD_STATIC}")
endif()
# =============================================================================
# [AsmJit - Configuration]
# =============================================================================
if (NOT DEFINED ASMJIT_EMBED)
set(ASMJIT_EMBED FALSE)
endif()
if (NOT DEFINED ASMJIT_STATIC)
set(ASMJIT_STATIC ${ASMJIT_EMBED})
endif()
if (NOT DEFINED ASMJIT_BUILD_X86)
set(ASMJIT_BUILD_X86 FALSE)
endif()
if (NOT DEFINED ASMJIT_TEST)
set(ASMJIT_TEST FALSE)
endif()
if (NOT DEFINED ASMJIT_NO_NATVIS)
set(ASMJIT_NO_NATVIS FALSE)
endif()
# EMBED implies STATIC.
if (ASMJIT_EMBED AND NOT ASMJIT_STATIC)
set(ASMJIT_STATIC TRUE)
endif()
set(ASMJIT_DIR "${CMAKE_CURRENT_LIST_DIR}" CACHE PATH "Location of 'asmjit'")
set(ASMJIT_TEST ${ASMJIT_TEST} CACHE BOOL "Build 'asmjit' test applications")
set(ASMJIT_EMBED ${ASMJIT_EMBED} CACHE BOOL "Embed 'asmjit' library (no targets)")
set(ASMJIT_STATIC ${ASMJIT_STATIC} CACHE BOOL "Build 'asmjit' library as static")
set(ASMJIT_SANITIZE ${ASMJIT_SANITIZE} CACHE STRING "Build with sanitizers: 'address', 'undefined', etc...")
set(ASMJIT_BUILD_X86 ${ASMJIT_BUILD_X86} CACHE BOOL "Build X86 backends (X86 and X86_64)")
# =============================================================================
# [AsmJit - Project]
# =============================================================================
set(ASMJIT_INCLUDE_DIRS "${ASMJIT_DIR}/src") # Include directory is the same as source dir.
set(ASMJIT_DEPS "") # AsmJit dependencies (libraries) for the linker.
set(ASMJIT_LIBS "") # Dependencies of libs/apps that want to use AsmJit.
set(ASMJIT_CFLAGS "") # Public compiler flags.
set(ASMJIT_PRIVATE_CFLAGS "") # Private compiler flags independent of build type.
set(ASMJIT_PRIVATE_CFLAGS_DBG "") # Private compiler flags used by debug builds.
set(ASMJIT_PRIVATE_CFLAGS_REL "") # Private compiler flags used by release builds.
set(ASMJIT_SANITIZE_CFLAGS "") # Compiler flags required by currently enabled sanitizers.
set(ASMJIT_SANITIZE_LFLAGS "") # Linker flags required by currently enabled sanitizers.
# =============================================================================
# [AsmJit - Utilities]
# =============================================================================
function(asmjit_detect_cflags out)
set(out_array ${${out}})
foreach(flag ${ARGN})
string(REGEX REPLACE "[+]" "x" flag_signature "${flag}")
string(REGEX REPLACE "[-=:;/.\]" "_" flag_signature "${flag_signature}")
check_cxx_compiler_flag(${flag} "__CxxFlag_${flag_signature}")
if (${__CxxFlag_${flag_signature}})
list(APPEND out_array "${flag}")
endif()
endforeach()
set(${out} "${out_array}" PARENT_SCOPE)
endfunction()
# Support for various sanitizers provided by C/C++ compilers.
function(asmjit_detect_sanitizers out)
set(_out_array ${${out}})
set(_flags "")
foreach(_arg ${ARGN})
string(REPLACE "," ";" _arg "${_arg}")
list(APPEND _flags ${_arg})
endforeach()
foreach(_flag ${_flags})
if (NOT "${_flag}" MATCHES "^-fsanitize=")
SET(_flag "-fsanitize=${_flag}")
endif()
# Sanitizers also require link flags, see CMAKE_REQUIRED_FLAGS.
set(CMAKE_REQUIRED_FLAGS "${_flag}")
asmjit_detect_cflags(_out_array ${_flag})
unset(CMAKE_REQUIRED_FLAGS)
endforeach()
set(${out} "${_out_array}" PARENT_SCOPE)
endfunction()
function(asmjit_add_target target target_type)
set(single_val "")
set(multi_val SOURCES LIBRARIES CFLAGS CFLAGS_DBG CFLAGS_REL)
cmake_parse_arguments("X" "" "${single_val}" "${multi_val}" ${ARGN})
if ("${target_type}" MATCHES "^(EXECUTABLE|TEST)$")
add_executable(${target} ${X_SOURCES})
else()
add_library(${target} ${target_type} ${X_SOURCES})
endif()
set_target_properties(${target} PROPERTIES DEFINE_SYMBOL "")
target_link_libraries(${target} PRIVATE ${X_LIBRARIES})
# target_link_options was added in cmake v3.13, don't use it for now...
foreach(link_flag ${ASMJIT_SANITIZE_LFLAGS})
set_property(TARGET ${target} APPEND_STRING PROPERTY LINK_FLAGS " ${link_flag}")
endforeach()
if (${CMAKE_VERSION} VERSION_LESS "3.8.0")
set_property(TARGET ${target} PROPERTY CXX_STANDARD 11)
else()
target_compile_features(${target} PUBLIC cxx_std_11)
endif()
set_property(TARGET ${target} PROPERTY CXX_EXTENSIONS NO)
set_property(TARGET ${target} PROPERTY CXX_VISIBILITY_PRESET hidden)
target_compile_options(${target} PRIVATE ${X_CFLAGS} ${ASMJIT_SANITIZE_CFLAGS} $<$<CONFIG:Debug>:${X_CFLAGS_DBG}> $<$<NOT:$<CONFIG:Debug>>:${X_CFLAGS_REL}>)
if ("${target_type}" STREQUAL "TEST")
add_test(NAME ${target} COMMAND ${target})
endif()
endfunction()
# =============================================================================
# [AsmJit - Compiler Support]
# =============================================================================
set(ASMJIT_INCLUDE_DIRS "${ASMJIT_DIR}/src") # Include directory is the same as source dir.
set(ASMJIT_DEPS "") # AsmJit dependencies (libraries) for the linker.
set(ASMJIT_LIBS "") # Dependencies of libs/apps that want to use AsmJit.
set(ASMJIT_CFLAGS "") # Public compiler flags.
set(ASMJIT_PRIVATE_CFLAGS "") # Private compiler flags independent of build type.
set(ASMJIT_PRIVATE_CFLAGS_DBG "") # Private compiler flags used by debug builds.
set(ASMJIT_PRIVATE_CFLAGS_REL "") # Private compiler flags used by release builds.
set(ASMJIT_SANITIZE_CFLAGS "") # Compiler flags required by currently enabled sanitizers.
set(ASMJIT_SANITIZE_LFLAGS "") # Linker flags required by currently enabled sanitizers.
# We will have to keep this most likely forever as some users may still be using it.
set(ASMJIT_INCLUDE_DIR "${ASMJIT_INCLUDE_DIRS}")
if (NOT ASMJIT_NO_CUSTOM_FLAGS)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC" OR "x${CMAKE_CXX_SIMULATE_ID}" STREQUAL "xMSVC")
list(APPEND ASMJIT_PRIVATE_CFLAGS
-MP # [+] Multi-Process Compilation.
-GF # [+] Eliminate duplicate strings.
-Zc:inline # [+] Remove unreferenced COMDAT.
-Zc:strictStrings # [+] Strict const qualification of string literals.
-Zc:threadSafeInit- # [-] Thread-safe statics.
-W4) # [+] Warning level 4.
list(APPEND ASMJIT_PRIVATE_CFLAGS_DBG
-GS) # [+] Buffer security-check.
list(APPEND ASMJIT_PRIVATE_CFLAGS_REL
-GS- # [-] Buffer security-check.
-O2 # [+] Favor speed over size.
-Oi) # [+] Generate intrinsic functions.
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "^(GNU|Clang|AppleClang)$")
list(APPEND ASMJIT_PRIVATE_CFLAGS -Wall -Wextra -Wconversion)
list(APPEND ASMJIT_PRIVATE_CFLAGS -fno-math-errno)
list(APPEND ASMJIT_PRIVATE_CFLAGS_REL -O2)
asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS
-fno-threadsafe-statics
-fno-semantic-interposition)
# The following flags can save few bytes in the resulting binary.
asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS_REL
-fmerge-all-constants # Merge all constants even if it violates ISO C++.
-fno-enforce-eh-specs) # Don't enforce termination if noexcept function throws.
endif()
endif()
# Support for sanitizers.
if (ASMJIT_SANITIZE)
ASMJIT_detect_sanitizers(ASMJIT_SANITIZE_CFLAGS ${ASMJIT_SANITIZE})
if (ASMJIT_SANITIZE_CFLAGS)
message("-- Enabling sanitizers: '${ASMJIT_SANITIZE_CFLAGS}'")
# Linker must receive the same flags as the compiler when it comes to sanitizers.
set(ASMJIT_SANITIZE_LFLAGS ${ASMJIT_SANITIZE_CFLAGS})
# Don't omit frame pointer if sanitizers are enabled.
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC" OR "x${CMAKE_CXX_SIMULATE_ID}" STREQUAL "xMSVC")
list(APPEND ASMJIT_SANITIZE_CFLAGS -Oy-)
else()
list(APPEND ASMJIT_SANITIZE_CFLAGS -fno-omit-frame-pointer -g)
endif()
list(APPEND ASMJIT_PRIVATE_CFLAGS ${ASMJIT_SANITIZE_CFLAGS})
list(APPEND ASMJIT_PRIVATE_LFLAGS ${ASMJIT_SANITIZE_LFLAGS})
endif()
endif()
if (NOT WIN32)
list(APPEND ASMJIT_DEPS pthread)
endif()
if ("${CMAKE_SYSTEM_NAME}" MATCHES "Linux" OR "${CMAKE_SYSTEM_NAME}" MATCHES "NetBSD")
list(APPEND ASMJIT_DEPS rt)
endif()
set(ASMJIT_LIBS ${ASMJIT_DEPS})
if (NOT ASMJIT_EMBED)
list(INSERT ASMJIT_LIBS 0 asmjit)
endif()
if (ASMJIT_EMBED)
set(ASMJIT_TARGET_TYPE "EMBED")
elseif (ASMJIT_STATIC)
set(ASMJIT_TARGET_TYPE "STATIC")
else()
set(ASMJIT_TARGET_TYPE "SHARED")
endif()
foreach(build_option ASMJIT_STATIC
ASMJIT_BUILD_X86
ASMJIT_NO_DEPRECATED
ASMJIT_NO_JIT
ASMJIT_NO_LOGGING
ASMJIT_NO_BUILDER
ASMJIT_NO_COMPILER
ASMJIT_NO_TEXT
ASMJIT_NO_VALIDATION
ASMJIT_NO_INTROSPECTION)
if (${build_option})
List(APPEND ASMJIT_CFLAGS "-D${build_option}")
List(APPEND ASMJIT_PRIVATE_CFLAGS "-D${build_option}")
endif()
endforeach()
# =============================================================================
# [AsmJit - Linker Support]
# =============================================================================
if (WIN32)
if(CMAKE_LINKER MATCHES "link\\.exe" OR CMAKE_LINKER MATCHES "lld-link\\.exe")
set(ASMJIT_LINKER_SUPPORTS_NATVIS TRUE)
endif()
endif()
# =============================================================================
# [AsmJit - Source]
# =============================================================================
set(ASMJIT_SRC_LIST
asmjit/asmjit.h
asmjit/asmjit-scope-begin.h
asmjit/asmjit-scope-end.h
asmjit/core.h
asmjit/core/api-build_p.h
asmjit/core/api-config.h
asmjit/core/archtraits.cpp
asmjit/core/archtraits.h
asmjit/core/archcommons.h
asmjit/core/assembler.cpp
asmjit/core/assembler.h
asmjit/core/builder.cpp
asmjit/core/builder.h
asmjit/core/codebuffer.h
asmjit/core/codeholder.cpp
asmjit/core/codeholder.h
asmjit/core/codewriter.cpp
asmjit/core/codewriter_p.h
asmjit/core/compiler.cpp
asmjit/core/compiler.h
asmjit/core/compilerdefs.h
asmjit/core/constpool.cpp
asmjit/core/constpool.h
asmjit/core/cpuinfo.cpp
asmjit/core/cpuinfo.h
asmjit/core/datatypes.h
asmjit/core/emithelper.cpp
asmjit/core/emithelper_p.h
asmjit/core/emitter.cpp
asmjit/core/emitter.h
asmjit/core/emitterutils.cpp
asmjit/core/emitterutils_p.h
asmjit/core/environment.cpp
asmjit/core/environment.h
asmjit/core/errorhandler.cpp
asmjit/core/errorhandler.h
asmjit/core/features.h
asmjit/core/formatter.cpp
asmjit/core/formatter.h
asmjit/core/func.cpp
asmjit/core/func.h
asmjit/core/funcargscontext.cpp
asmjit/core/funcargscontext_p.h
asmjit/core/globals.cpp
asmjit/core/globals.h
asmjit/core/inst.cpp
asmjit/core/inst.h
asmjit/core/jitallocator.cpp
asmjit/core/jitallocator.h
asmjit/core/jitruntime.cpp
asmjit/core/jitruntime.h
asmjit/core/logger.cpp
asmjit/core/logger.h
asmjit/core/misc_p.h
asmjit/core/operand.cpp
asmjit/core/operand.h
asmjit/core/osutils.cpp
asmjit/core/osutils.h
asmjit/core/raassignment_p.h
asmjit/core/rabuilders_p.h
asmjit/core/radefs_p.h
asmjit/core/ralocal.cpp
asmjit/core/ralocal_p.h
asmjit/core/rapass.cpp
asmjit/core/rapass_p.h
asmjit/core/rastack.cpp
asmjit/core/rastack_p.h
asmjit/core/string.cpp
asmjit/core/string.h
asmjit/core/support.cpp
asmjit/core/support.h
asmjit/core/target.cpp
asmjit/core/target.h
asmjit/core/type.cpp
asmjit/core/type.h
asmjit/core/virtmem.cpp
asmjit/core/virtmem.h
asmjit/core/zone.cpp
asmjit/core/zone.h
asmjit/core/zonehash.cpp
asmjit/core/zonehash.h
asmjit/core/zonelist.cpp
asmjit/core/zonelist.h
asmjit/core/zonestack.cpp
asmjit/core/zonestack.h
asmjit/core/zonestring.h
asmjit/core/zonetree.cpp
asmjit/core/zonetree.h
asmjit/core/zonevector.cpp
asmjit/core/zonevector.h
asmjit/x86.h
asmjit/x86/x86archtraits_p.h
asmjit/x86/x86assembler.cpp
asmjit/x86/x86assembler.h
asmjit/x86/x86builder.cpp
asmjit/x86/x86builder.h
asmjit/x86/x86compiler.cpp
asmjit/x86/x86compiler.h
asmjit/x86/x86emithelper.cpp
asmjit/x86/x86emithelper_p.h
asmjit/x86/x86emitter.h
asmjit/x86/x86features.cpp
asmjit/x86/x86features.h
asmjit/x86/x86formatter.cpp
asmjit/x86/x86formatter_p.h
asmjit/x86/x86func.cpp
asmjit/x86/x86func_p.h
asmjit/x86/x86globals.h
asmjit/x86/x86instdb.cpp
asmjit/x86/x86instdb.h
asmjit/x86/x86instdb_p.h
asmjit/x86/x86instapi.cpp
asmjit/x86/x86instapi_p.h
asmjit/x86/x86operand.cpp
asmjit/x86/x86operand.h
asmjit/x86/x86rapass.cpp
asmjit/x86/x86rapass_p.h
)
if (MSVC AND NOT ASMJIT_NO_NATVIS)
list(APPEND ASMJIT_SRC_LIST asmjit.natvis)
endif()
set(ASMJIT_SRC "")
foreach(src_file ${ASMJIT_SRC_LIST})
set(src_file "${ASMJIT_DIR}/src/${src_file}")
list(APPEND ASMJIT_SRC ${src_file})
if ("${src_file}" MATCHES "\\.natvis")
if (ASMJIT_LINKER_SUPPORTS_NATVIS)
list(APPEND ASMJIT_PRIVATE_LFLAGS "-natvis:${src_file}")
endif()
endif()
endforeach()
if (NOT ${CMAKE_VERSION} VERSION_LESS "3.8.0")
source_group(TREE "${ASMJIT_DIR}" FILES ${ASMJIT_SRC})
endif()
# =============================================================================
# [AsmJit - Summary]
# =============================================================================
message("** AsmJit Summary **")
message(" ASMJIT_DIR=${ASMJIT_DIR}")
message(" ASMJIT_TEST=${ASMJIT_TEST}")
message(" ASMJIT_TARGET_TYPE=${ASMJIT_TARGET_TYPE}")
message(" ASMJIT_DEPS=${ASMJIT_DEPS}")
message(" ASMJIT_LIBS=${ASMJIT_LIBS}")
message(" ASMJIT_CFLAGS=${ASMJIT_CFLAGS}")
message(" ASMJIT_PRIVATE_CFLAGS=${ASMJIT_PRIVATE_CFLAGS}")
message(" ASMJIT_PRIVATE_CFLAGS_DBG=${ASMJIT_PRIVATE_CFLAGS_DBG}")
message(" ASMJIT_PRIVATE_CFLAGS_REL=${ASMJIT_PRIVATE_CFLAGS_REL}")
# =============================================================================
# [AsmJit - Targets]
# =============================================================================
if (NOT ASMJIT_EMBED)
# Add AsmJit target.
asmjit_add_target(asmjit "${ASMJIT_TARGET_TYPE}"
SOURCES ${ASMJIT_SRC}
LIBRARIES ${ASMJIT_DEPS}
CFLAGS ${ASMJIT_PRIVATE_CFLAGS}
CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG}
CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL})
target_compile_options(asmjit INTERFACE ${ASMJIT_CFLAGS})
target_include_directories(asmjit BEFORE INTERFACE
$<BUILD_INTERFACE:${ASMJIT_INCLUDE_DIRS}>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
# Add blend2d::blend2d alias.
add_library(asmjit::asmjit ALIAS asmjit)
# TODO: [CMAKE] Deprecated alias - we use projectname::libraryname convention now.
add_library(AsmJit::AsmJit ALIAS asmjit)
# Add AsmJit install instructions (library and public headers).
if (NOT ASMJIT_NO_INSTALL)
install(TARGETS asmjit
EXPORT asmjit-config
RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
install(EXPORT asmjit-config
NAMESPACE asmjit::
DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/asmjit")
foreach(_src_file ${ASMJIT_SRC_LIST})
if ("${_src_file}" MATCHES "\\.h$" AND NOT "${_src_file}" MATCHES "_p\\.h$")
get_filename_component(_src_dir ${_src_file} PATH)
install(FILES "${ASMJIT_DIR}/src/${_src_file}" DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/${_src_dir}")
endif()
endforeach()
endif()
# Add AsmJit tests.
if (ASMJIT_TEST)
enable_testing()
# Special target that always uses embedded AsmJit.
asmjit_add_target(asmjit_test_unit TEST
SOURCES ${ASMJIT_SRC}
test/asmjit_test_unit.cpp
test/broken.cpp
test/broken.h
LIBRARIES ${ASMJIT_DEPS}
CFLAGS ${ASMJIT_PRIVATE_CFLAGS}
-DASMJIT_TEST
-DASMJIT_STATIC
CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG}
CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL})
target_include_directories(asmjit_test_unit BEFORE PRIVATE ${ASMJIT_INCLUDE_DIRS})
foreach(_target asmjit_test_opcode
asmjit_test_x86_asm
asmjit_test_x86_sections)
asmjit_add_target(${_target} TEST
SOURCES test/${_target}.cpp
LIBRARIES asmjit::asmjit
CFLAGS ${ASMJIT_PRIVATE_CFLAGS}
CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG}
CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL})
endforeach()
if (NOT ASMJIT_NO_INTROSPECTION)
asmjit_add_target(asmjit_test_x86_instinfo TEST
SOURCES test/asmjit_test_x86_instinfo.cpp
LIBRARIES asmjit::asmjit
CFLAGS ${ASMJIT_PRIVATE_CFLAGS}
CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG}
CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL})
endif()
if (NOT (ASMJIT_NO_BUILDER OR ASMJIT_NO_COMPILER))
# Vectorcall tests and XMM tests require at least SSE2 in 32-bit mode (in 64-bit mode it's implicit).
set(sse2_flags "")
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC" OR "x${CMAKE_CXX_SIMULATE_ID}" STREQUAL "xMSVC")
asmjit_detect_cflags(sse2_flags "-arch:SSE2")
else()
asmjit_detect_cflags(sse2_flags "-msse2")
endif()
asmjit_add_target(asmjit_test_compiler TEST
SOURCES test/asmjit_test_compiler.cpp
test/asmjit_test_compiler_x86.cpp
test/asmjit_test_compiler.h
LIBRARIES asmjit::asmjit
CFLAGS ${ASMJIT_PRIVATE_CFLAGS} ${sse2_flags}
CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG}
CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL})
endif()
foreach(_target asmjit_bench_x86)
asmjit_add_target(${_target} EXECUTABLE
SOURCES test/${_target}.cpp
LIBRARIES asmjit::asmjit
CFLAGS ${ASMJIT_PRIVATE_CFLAGS}
CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG}
CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL})
endforeach()
endif()
endif()
cmake_policy(POP)

17
deps/asmjit/LICENSE.md vendored Normal file
View File

@ -0,0 +1,17 @@
Copyright (c) 2008-2020 The AsmJit Authors
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.

69
deps/asmjit/README.md vendored Normal file
View File

@ -0,0 +1,69 @@
AsmJit
------
AsmJit is a lightweight library for machine code generation written in C++ language.
* [Official Home Page (asmjit.com)](https://asmjit.com)
* [Official Repository (asmjit/asmjit)](https://github.com/asmjit/asmjit)
* [Public Chat Channel](https://gitter.im/asmjit/asmjit)
* [Zlib License](./LICENSE.md)
See [asmjit.com](https://asmjit.com) page for more details, examples, and documentation.
Documentation
-------------
* [Documentation Index](https://asmjit.com/doc/index.html)
* [Build Instructions](https://asmjit.com/doc/group__asmjit__build.html)
Breaking Changes
----------------
Breaking the API is sometimes inevitable, what to do?
* See [Breaking Changes Guide](https://asmjit.com/doc/group__asmjit__breaking__changes.html), which is now part of AsmJit documentation.
* See asmjit tests, they always compile and provide implementation of many use-cases:
* [asmjit_test_x86_asm.cpp](./test/asmjit_test_x86_asm.cpp) - Tests that demonstrate the purpose of emitters.
* [asmjit_test_x86_sections.cpp](./test/asmjit_test_x86_sections.cpp) - Multiple sections test.
* [asmjit_test_compiler_x86.cpp](./test/asmjit_test_compiler_x86.cpp) - Tests targeting AsmJit's Compiler (x86/x64).
* Visit our [Official Chat](https://gitter.im/asmjit/asmjit) if you need a quick help.
Project Organization
--------------------
* **`/`** - Project root.
* **src** - Source code.
* **asmjit** - Source code and headers (always point include path in here).
* **core** - Core API, backend independent except relocations.
* **arm** - ARM specific API, used only by ARM and AArch64 backends.
* **x86** - X86 specific API, used only by X86 and X64 backends.
* **test** - Unit and integration tests (don't embed in your project).
* **tools** - Tools used for configuring, documenting, and generating files.
TODO
----
* [ ] Core:
* [ ] Add support for user external buffers in CodeBuffer / CodeHolder.
* [ ] Register allocator doesn't understand register pairs, affected instructions:
* [ ] v4fmaddps, v4fmaddss, v4fnmaddps, v4fnmaddss
* [ ] vp4dpwssd, vp4dpwssds
* [ ] vp2intersectd, vp2intersectq
* [ ] Ports:
* [ ] ARM/Thumb/AArch64 support.
Support
-------
* AsmJit project has both community and commercial support, see [AsmJit's Support Page](https://asmjit.com/support.html)
* You can help the development and maintenance through Petr Kobalicek's [GitHub sponsors Profile](https://github.com/sponsors/kobalicek)
Notable Donors List:
* [ZehMatt](https://github.com/ZehMatt)
Authors & Maintainers
---------------------
* Petr Kobalicek <kobalicek.petr@gmail.com>

201
deps/asmjit/src/asmjit.natvis vendored Normal file
View File

@ -0,0 +1,201 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- asmjit visualizer for Visual Studio (natvis) -->
<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
<Type Name="asmjit::String">
<Intrinsic Name="isSmall" Expression="(_type &lt; 0x1F)"/>
<DisplayString Condition="isSmall()">{_small.data, s8}</DisplayString>
<DisplayString Condition="!isSmall()">{_large.data, s8}</DisplayString>
<Expand HideRawView="true">
<Synthetic Name="_type">
<DisplayString Condition="(_type &lt; 0x1F)">Small</DisplayString>
<DisplayString Condition="(_type == 0x1F)">Large</DisplayString>
<DisplayString Condition="(_type &gt; 0x1F)">External</DisplayString>
</Synthetic>
<Item Name="_size" Condition="isSmall()" ExcludeView="simple">(int)_small.type, d</Item>
<Item Name="_size" Condition="!isSmall()" ExcludeView="simple">_large.size, d</Item>
<Item Name="_capacity" Condition="isSmall()" ExcludeView="simple">asmjit::String::kSSOCapacity, d</Item>
<Item Name="_capacity" Condition="!isSmall()" ExcludeView="simple">_large.capacity, d</Item>
<Item Name="_data" Condition="isSmall()" ExcludeView="simple">_small.data, s8</Item>
<Item Name="_data" Condition="!isSmall()" ExcludeView="simple">_large.data, s8</Item>
</Expand>
</Type>
<Type Name="asmjit::ZoneVector&lt;*&gt;">
<DisplayString>{{ [size={_size, d} capacity={_capacity, d}] }}</DisplayString>
<Expand>
<Item Name="_size" ExcludeView="simple">_size, d</Item>
<Item Name="_capacity" ExcludeView="simple">_capacity, d</Item>
<ArrayItems>
<Size>_size</Size>
<ValuePointer>(($T1*)_data)</ValuePointer>
</ArrayItems>
</Expand>
</Type>
<Type Name="asmjit::Operand_">
<Intrinsic Name="opType" Expression="(unsigned int)(_signature &amp; 0x7)" />
<Intrinsic Name="opSize" Expression="(_signature &gt;&gt; 24) &amp; 0xFF" />
<Intrinsic Name="regType" Expression="(_signature &gt;&gt; 3) &amp; 0x1F" />
<Intrinsic Name="regGroup" Expression="(_signature &gt;&gt; 8) &amp; 0xF" />
<Intrinsic Name="memBaseType" Expression="(_signature &gt;&gt; 3) &amp; 0x1F" />
<Intrinsic Name="memIndexType" Expression="(_signature &gt;&gt; 8) &amp; 0x1F" />
<Intrinsic Name="memAddrType" Expression="(_signature &gt;&gt; 13) &amp; 0x3" />
<Intrinsic Name="memRegHome" Expression="(_signature &gt;&gt; 15) &amp; 0x1" />
<Intrinsic Name="memBaseId" Expression="_baseId" />
<Intrinsic Name="memIndexId" Expression="_data[0]" />
<Intrinsic Name="memOffset32b" Expression="(__int64)int(_data[1])" />
<Intrinsic Name="memOffset64b" Expression="(__int64) ((unsigned __int64)_baseId &lt;&lt; 32) | ((unsigned __int64)_data[1])" />
<Intrinsic Name="memOffset" Expression="memBaseType() != 0 ? memOffset32b() : memOffset64b()" />
<Intrinsic Name="immValue" Expression="((__int64)_data[1] &lt;&lt; 32) | (__int64)_data[0]" />
<DisplayString Condition="opType() == 0">[None]</DisplayString>
<DisplayString Condition="opType() == 1">[Reg] {{ id={_baseId, d} group={regGroup(), d} type={regType(), d} size={opSize(), d} }}</DisplayString>
<DisplayString Condition="opType() == 2">[Mem] {{ baseId={memBaseId(), d} indexId={memIndexId(), d} offset={(__int64)memOffset(), d} }}</DisplayString>
<DisplayString Condition="opType() == 3">[Imm] {{ val={immValue(), d} hex={immValue(), X} }}</DisplayString>
<DisplayString Condition="opType() == 4">[Label] {{ id={_baseId} }}</DisplayString>
<DisplayString Condition="opType() &gt; 4">[Unknown]</DisplayString>
<Expand HideRawView="true">
<Item Name="_signature">_signature, X</Item>
<Item Name="_signature.any.type">(asmjit::Operand_::OpType)opType()</Item>
<Item Name="_signature.any.size">opSize(), d</Item>
<Item Name="_signature.reg.type" Condition="opType() == 1">(asmjit::BaseReg::RegType)regType()</Item>
<Item Name="_signature.reg.group" Condition="opType() == 1">(asmjit::BaseReg::RegGroup)regGroup()</Item>
<Item Name="_signature.mem.baseType" Condition="opType() == 2">(asmjit::BaseReg::RegType)memBaseType()</Item>
<Item Name="_signature.mem.indexType" Condition="opType() == 2">(asmjit::BaseReg::RegType)memIndexType()</Item>
<Item Name="_signature.mem.addrType" Condition="opType() == 2">(asmjit::BaseMem::AddrType)memAddrType()</Item>
<Item Name="_signature.mem.regHome" Condition="opType() == 2">(bool)memRegHome()</Item>
<Item Name="_baseId">_baseId</Item>
<Item Name="_data[0]" Condition="opType() != 2 &amp;&amp; opType() != 3">_data[0]</Item>
<Item Name="_data[1]" Condition="opType() != 2 &amp;&amp; opType() != 3">_data[1]</Item>
<Item Name="_data[IndexId]" Condition="opType() == 2">_data[0]</Item>
<Item Name="_data[OffsetLo]" Condition="opType() == 2">_data[1]</Item>
<Item Name="_data[ImmHi]" Condition="opType() == 3">_data[0]</Item>
<Item Name="_data[ImmLo]" Condition="opType() == 3">_data[1]</Item>
</Expand>
</Type>
<Type Name="asmjit::FuncValue">
<Intrinsic Name="isReg" Expression="(_data &amp; asmjit::FuncValue::kFlagIsReg) != 0" />
<Intrinsic Name="isStack" Expression="(_data &amp; asmjit::FuncValue::kFlagIsStack) != 0" />
<Intrinsic Name="isIndirect" Expression="(_data &amp; asmjit::FuncValue::kFlagIsIndirect) != 0" />
<Intrinsic Name="isDone" Expression="(_data &amp; asmjit::FuncValue::kFlagIsDone) != 0" />
<Intrinsic Name="typeId" Expression="((_data &amp; asmjit::FuncValue::kTypeIdMask) &gt;&gt; asmjit::FuncValue::kTypeIdShift)" />
<Intrinsic Name="regId" Expression="((_data &amp; asmjit::FuncValue::kRegIdMask) &gt;&gt; asmjit::FuncValue::kRegIdShift)" />
<Intrinsic Name="regType" Expression="((_data &amp; asmjit::FuncValue::kRegTypeMask) &gt;&gt; asmjit::FuncValue::kRegTypeShift)" />
<Intrinsic Name="stackOffset" Expression="((_data &amp; asmjit::FuncValue::kStackOffsetMask) &gt;&gt; asmjit::FuncValue::kStackOffsetShift)" />
<DisplayString Condition="isReg()">[RegValue {{ regType={regType()} indirect={isIndirect()} done={isDone()} }}]</DisplayString>
<DisplayString Condition="isStack()">[StackValue {{ indirect={isIndirect()} done={isDone()} }}]</DisplayString>
<DisplayString Condition="!isReg() &amp;&amp; !isStack()">[Unknown]</DisplayString>
<Expand HideRawView="true">
<Item Name="data">_data</Item>
<Item Name="typeId">(asmjit::Type::Id)(typeId())</Item>
<Item Name="regType" Condition="isReg()">(asmjit::BaseReg::RegType)regType()</Item>
<Item Name="regId" Condition="isReg()">regId()</Item>
<Item Name="stackOffset" Condition="isStack()">stackOffset()</Item>
</Expand>
</Type>
<Type Name="asmjit::BaseNode">
<Intrinsic Name="nodeType" Expression="_any._nodeType" />
<Intrinsic Name="isInst" Expression="nodeType() == asmjit::BaseNode::kNodeInst"></Intrinsic>
<Intrinsic Name="isSection" Expression="nodeType() == asmjit::BaseNode::kNodeSection"></Intrinsic>
<Intrinsic Name="isLabel" Expression="nodeType() == asmjit::BaseNode::kNodeLabel"></Intrinsic>
<Intrinsic Name="isAlign" Expression="nodeType() == asmjit::BaseNode::kNodeAlign"></Intrinsic>
<Intrinsic Name="isEmbedData" Expression="nodeType() == asmjit::BaseNode::kNodeEmbedData"></Intrinsic>
<Intrinsic Name="isEmbedLabel" Expression="nodeType() == asmjit::BaseNode::kNodeEmbedLabel"></Intrinsic>
<Intrinsic Name="isEmbedLabelDelta" Expression="nodeType() == asmjit::BaseNode::kNodeEmbedLabelDelta"></Intrinsic>
<Intrinsic Name="isConstPool" Expression="nodeType() == asmjit::BaseNode::kNodeConstPool"></Intrinsic>
<Intrinsic Name="isComment" Expression="nodeType() == asmjit::BaseNode::kNodeComment"></Intrinsic>
<Intrinsic Name="isSentinel" Expression="nodeType() == asmjit::BaseNode::kNodeSentinel"></Intrinsic>
<Intrinsic Name="isJump" Expression="nodeType() == asmjit::BaseNode::kNodeJump"></Intrinsic>
<Intrinsic Name="isFunc" Expression="nodeType() == asmjit::BaseNode::kNodeFunc"></Intrinsic>
<Intrinsic Name="isFuncRet" Expression="nodeType() == asmjit::BaseNode::kNodeFuncRet"></Intrinsic>
<Intrinsic Name="isInvoke" Expression="nodeType() == asmjit::BaseNode::kNodeInvoke"></Intrinsic>
<Intrinsic Name="actsAsInst" Expression="isInst() || isJump() || isFunc() || isFuncRet() || isInvoke()" />
<Intrinsic Name="actsAsLabel" Expression="isLabel() || isFunc()" />
<DisplayString Condition="isInst()">[InstNode]</DisplayString>
<DisplayString Condition="isSentinel()">[SectionNode]</DisplayString>
<DisplayString Condition="isLabel()">[LabelNode]</DisplayString>
<DisplayString Condition="isAlign()">[AlignNode]</DisplayString>
<DisplayString Condition="isEmbedData()">[EmbedDataNode]</DisplayString>
<DisplayString Condition="isEmbedLabel()">[EmbedLabelNode]</DisplayString>
<DisplayString Condition="isEmbedLabelDelta()">[EmbedLabelDeltaNode]</DisplayString>
<DisplayString Condition="isConstPool()">[ConstPoolNode]</DisplayString>
<DisplayString Condition="isComment()">[CommentNode]</DisplayString>
<DisplayString Condition="isSentinel()">[SentinelNode]</DisplayString>
<DisplayString Condition="isJump()">[JumpNode]</DisplayString>
<DisplayString Condition="isFunc()">[FuncNode]</DisplayString>
<DisplayString Condition="isFuncRet()">[FuncRetNode]</DisplayString>
<DisplayString Condition="isInvoke()">[InvokeNode]</DisplayString>
<DisplayString Condition="nodeType() == 0 || nodeType() &gt; 18">[UnknownNode {nodeType(), d}]</DisplayString>
<Expand HideRawView="true">
<Item Name="prev">_prev</Item>
<Item Name="next">_next</Item>
<Item Name="nodeType">(asmjit::BaseNode::NodeType)_any._nodeType</Item>
<Item Name="nodeFlags">(asmjit::BaseNode::Flags)_any._nodeFlags</Item>
<Item Name="position">_position</Item>
<Item Name="userData.u64">_userDataU64</Item>
<Item Name="userData.ptr">_userDataPtr</Item>
<Item Name="passData">_passData</Item>
<Item Name="inlineComment">_inlineComment, s8</Item>
<Item Name="baseInst" Condition="actsAsInst()">((asmjit::InstNode*)this)-&gt;_baseInst</Item>
<Item Name="opCount" Condition="actsAsInst()">_inst._opCount</Item>
<Item Name="opCapacity" Condition="actsAsInst()">_inst._opCapacity</Item>
<Item Name="opArray" Condition="actsAsInst()">((asmjit::InstNode*)this)-&gt;_opArray, [_inst._opCount]</Item>
<Item Name="sectionId" Condition="isSection()">((asmjit::SectionNode*)this)-&gt;_id</Item>
<Item Name="nextSection" Condition="isSection()">((asmjit::SectionNode*)this)-&gt;_nextSection</Item>
<Item Name="labelId" Condition="isLabel()">((asmjit::LabelNode*)this)-&gt;_id</Item>
<Item Name="alignMode" Condition="isAlign()">((asmjit::AlignNode*)this)-&gt;_alignMode</Item>
<Item Name="alignment" Condition="isAlign()">((asmjit::AlignNode*)this)-&gt;_alignment</Item>
<Item Name="typeId" Condition="isEmbedData()">_embed._typeId, d</Item>
<Item Name="typeSize" Condition="isEmbedData()">_embed._typeSize, d</Item>
<Item Name="itemCount" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_itemCount</Item>
<Item Name="repeatCount" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_repeatCount</Item>
<Item Name="inlineData" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_inlineData</Item>
<Item Name="externalData" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_externalData</Item>
<Item Name="labelId" Condition="isEmbedLabel()">((asmjit::EmbedLabelNode*)this)-&gt;_id</Item>
<Item Name="labelId" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_id</Item>
<Item Name="baseId" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_baseId</Item>
<Item Name="dataSize" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_dataSize</Item>
<Item Name="constPool" Condition="isConstPool()">((asmjit::ConstPoolNode*)this)-&gt;_constPool</Item>
<Item Name="sentinel.sentinelType" Condition="isSentinel()">(asmjit::SentinelNode::SentinelType)_sentinel._sentinelType</Item>
<Item Name="annotation" Condition="isJump()">((asmjit::JumpNode*)this)-&gt;_annotation</Item>
<Item Name="funcDetail" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_funcDetail</Item>
<Item Name="frame" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_frame</Item>
<Item Name="exitNode" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_exitNode</Item>
<Item Name="end" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_end</Item>
<Item Name="args" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_args, [((asmjit::FuncNode*)this)-&gt;_funcDetail._argCount]</Item>
<Item Name="funcDetail" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_funcDetail</Item>
<Item Name="rets" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_rets, [((asmjit::InvokeNode*)this)-&gt;_funcDetail._retCount]</Item>
<Item Name="args" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_args, [((asmjit::InvokeNode*)this)-&gt;_funcDetail._argCount]</Item>
</Expand>
</Type>
</AutoVisualizer>

View File

@ -0,0 +1,35 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifdef _WIN32
#pragma push_macro("min")
#pragma push_macro("max")
#ifdef min
#undef min
#endif
#ifdef max
#undef max
#endif
#endif

View File

@ -0,0 +1,27 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifdef _WIN32
#pragma pop_macro("min")
#pragma pop_macro("max")
#endif

37
deps/asmjit/src/asmjit/asmjit.h vendored Normal file
View File

@ -0,0 +1,37 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_ASMJIT_H_INCLUDED
#define ASMJIT_ASMJIT_H_INCLUDED
#include "./core.h"
#ifdef ASMJIT_BUILD_X86
#include "./x86.h"
#endif
#ifdef ASMJIT_BUILD_ARM
#include "./arm.h"
#endif
#endif // ASMJIT_ASMJIT_H_INCLUDED

2063
deps/asmjit/src/asmjit/core.h vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,77 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_API_BUILD_P_H_INCLUDED
#define ASMJIT_CORE_API_BUILD_P_H_INCLUDED
#define ASMJIT_EXPORTS
// Only turn-off these warnings when building asmjit itself.
#ifdef _MSC_VER
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#endif
// Dependencies only required for asmjit build, but never exposed through public headers.
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#endif
// ============================================================================
// [asmjit::Build - Globals - Build-Only]
// ============================================================================
#include "./api-config.h"
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__) && !defined(__clang__)
#define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os")))
#define ASMJIT_FAVOR_SPEED __attribute__((__optimize__("O3")))
#elif ASMJIT_CXX_HAS_ATTRIBUTE(__minsize__, 0)
#define ASMJIT_FAVOR_SIZE __attribute__((__minsize__))
#define ASMJIT_FAVOR_SPEED
#else
#define ASMJIT_FAVOR_SIZE
#define ASMJIT_FAVOR_SPEED
#endif
// Make sure '#ifdef'ed unit tests are properly highlighted in IDE.
#if !defined(ASMJIT_TEST) && defined(__INTELLISENSE__)
#define ASMJIT_TEST
#endif
// Include a unit testing package if this is a `asmjit_test_unit` build.
#if defined(ASMJIT_TEST)
#include "../../../test/broken.h"
#endif
#endif // ASMJIT_CORE_API_BUILD_P_H_INCLUDED

552
deps/asmjit/src/asmjit/core/api-config.h vendored Normal file
View File

@ -0,0 +1,552 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_API_CONFIG_H_INCLUDED
#define ASMJIT_CORE_API_CONFIG_H_INCLUDED
// ============================================================================
// [asmjit::Version]
// ============================================================================
//! \addtogroup asmjit_core
//! \{
//! AsmJit library version in `(Major << 16) | (Minor << 8) | (Patch)` format.
#define ASMJIT_LIBRARY_VERSION 0x010400 /* 1.4.0 */
//! \}
// ============================================================================
// [asmjit::Build - Documentation]
// ============================================================================
// NOTE: Doxygen cannot document macros that are not defined, that's why we have
// to define them and then undefine them, so it won't use the macros with its
// own preprocessor.
#ifdef _DOXYGEN
namespace asmjit {
//! \addtogroup asmjit_build
//! \{
//! Asmjit is embedded, implies \ref ASMJIT_STATIC.
#define ASMJIT_EMBED
//! Enables static-library build.
#define ASMJIT_STATIC
//! Defined when AsmJit's build configuration is 'Debug'.
//!
//! \note Can be defined explicitly to bypass autodetection.
#define ASMJIT_BUILD_DEBUG
//! Defined when AsmJit's build configuration is 'Release'.
//!
//! \note Can be defined explicitly to bypass autodetection.
#define ASMJIT_BUILD_RELEASE
//! Defined to build X86/X64 backend.
#define ASMJIT_BUILD_X86
//! Defined to build host backend autodetected at compile-time.
#define ASMJIT_BUILD_HOST
//! Disables deprecated API at compile time.
#define ASMJIT_NO_DEPRECATED
//! Disable non-host architectures entirely.
#define ASMJIT_NO_FOREIGN
//! Disables \ref asmjit_builder functionality completely.
#define ASMJIT_NO_BUILDER
//! Disables \ref asmjit_compiler functionality completely.
#define ASMJIT_NO_COMPILER
//! Disables JIT memory management and \ref JitRuntime.
#define ASMJIT_NO_JIT
//! Disables \ref Logger and \ref Formatter.
#define ASMJIT_NO_LOGGING
//! Disables everything that contains text.
#define ASMJIT_NO_TEXT
//! Disables instruction validation API.
#define ASMJIT_NO_VALIDATION
//! Disables instruction introspection API.
#define ASMJIT_NO_INTROSPECTION
// Avoid doxygen preprocessor using feature-selection definitions.
#undef ASMJIT_NO_BUILDER
#undef ASMJIT_NO_COMPILER
#undef ASMJIT_NO_JIT
#undef ASMJIT_NO_LOGGING
#undef ASMJIT_NO_TEXT
#undef ASMJIT_NO_VALIDATION
#undef ASMJIT_NO_INTROSPECTION
//! \}
} // {asmjit}
#endif // _DOXYGEN
// Enable all features at IDE level, so it's properly highlighted and indexed.
#ifdef __INTELLISENSE__
#ifndef ASMJIT_BUILD_X86
#define ASMJIT_BUILD_X86
#endif
#endif
// ============================================================================
// [asmjit::Dependencies]
// ============================================================================
// We really want std-types as globals.
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iterator>
#include <limits>
#include <new>
#include <type_traits>
#include <utility>
#if !defined(_WIN32) && !defined(__EMSCRIPTEN__)
#include <pthread.h>
#endif
// ============================================================================
// [asmjit::Options]
// ============================================================================
// ASMJIT_NO_BUILDER implies ASMJIT_NO_COMPILER.
#if defined(ASMJIT_NO_BUILDER) && !defined(ASMJIT_NO_COMPILER)
#define ASMJIT_NO_COMPILER
#endif
// Prevent compile-time errors caused by misconfiguration.
#if defined(ASMJIT_NO_TEXT) && !defined(ASMJIT_NO_LOGGING)
#pragma "ASMJIT_NO_TEXT can only be defined when ASMJIT_NO_LOGGING is defined."
#undef ASMJIT_NO_TEXT
#endif
#if defined(ASMJIT_NO_INTROSPECTION) && !defined(ASMJIT_NO_COMPILER)
#pragma message("ASMJIT_NO_INTROSPECTION can only be defined when ASMJIT_NO_COMPILER is defined")
#undef ASMJIT_NO_INTROSPECTION
#endif
// ============================================================================
// [asmjit::Build - Globals - Deprecated]
// ============================================================================
#ifndef ASMJIT_NO_DEPRECATED
#if defined(ASMJIT_BUILD_EMBED) || defined(ASMJIT_BUILD_STATIC)
#if defined(ASMJIT_BUILD_EMBED)
#pragma message("'ASMJIT_BUILD_EMBED' is deprecated, use 'ASMJIT_STATIC'")
#endif
#if defined(ASMJIT_BUILD_STATIC)
#pragma message("'ASMJIT_BUILD_STATIC' is deprecated, use 'ASMJIT_STATIC'")
#endif
#if !defined(ASMJIT_STATIC)
#define ASMJIT_STATIC
#endif
#endif
#endif // !ASMJIT_NO_DEPRECATED
// ============================================================================
// [asmjit::Build - Globals - Build Mode]
// ============================================================================
// Detect ASMJIT_BUILD_DEBUG and ASMJIT_BUILD_RELEASE if not defined.
#if !defined(ASMJIT_BUILD_DEBUG) && !defined(ASMJIT_BUILD_RELEASE)
#if !defined(NDEBUG)
#define ASMJIT_BUILD_DEBUG
#else
#define ASMJIT_BUILD_RELEASE
#endif
#endif
// ============================================================================
// [asmjit::Build - Globals - Target Architecture Information]
// ============================================================================
#if defined(_M_X64) || defined(__x86_64__)
#define ASMJIT_ARCH_X86 64
#elif defined(_M_IX86) || defined(__X86__) || defined(__i386__)
#define ASMJIT_ARCH_X86 32
#else
#define ASMJIT_ARCH_X86 0
#endif
#if defined(__arm64__) || defined(__aarch64__)
# define ASMJIT_ARCH_ARM 64
#elif defined(_M_ARM) || defined(_M_ARMT) || defined(__arm__) || defined(__thumb__) || defined(__thumb2__)
#define ASMJIT_ARCH_ARM 32
#else
#define ASMJIT_ARCH_ARM 0
#endif
#if defined(_MIPS_ARCH_MIPS64) || defined(__mips64)
#define ASMJIT_ARCH_MIPS 64
#elif defined(_MIPS_ARCH_MIPS32) || defined(_M_MRX000) || defined(__mips__)
#define ASMJIT_ARCH_MIPS 32
#else
#define ASMJIT_ARCH_MIPS 0
#endif
#define ASMJIT_ARCH_BITS (ASMJIT_ARCH_X86 | ASMJIT_ARCH_ARM | ASMJIT_ARCH_MIPS)
#if ASMJIT_ARCH_BITS == 0
#undef ASMJIT_ARCH_BITS
#if defined (__LP64__) || defined(_LP64)
#define ASMJIT_ARCH_BITS 64
#else
#define ASMJIT_ARCH_BITS 32
#endif
#endif
#if (defined(__ARMEB__)) || \
(defined(__MIPSEB__)) || \
(defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
#define ASMJIT_ARCH_LE 0
#define ASMJIT_ARCH_BE 1
#else
#define ASMJIT_ARCH_LE 1
#define ASMJIT_ARCH_BE 0
#endif
// ============================================================================
// [asmjit::Build - Globals - Build Architectures Definitions]
// ============================================================================
#if !defined(ASMJIT_NO_FOREIGN)
// If 'ASMJIT_NO_FOREIGN' is not defined then all architectures will be built.
#if !defined(ASMJIT_BUILD_X86)
#define ASMJIT_BUILD_X86
#endif
#else
// Detect architectures to build if building only for the host architecture.
#if ASMJIT_ARCH_X86 && !defined(ASMJIT_BUILD_X86)
#define ASMJIT_BUILD_X86
#endif
#endif
// Define 'ASMJIT_BUILD_HOST' if we know that host architecture will be built.
#if !defined(ASMJIT_BUILD_HOST) && ASMJIT_ARCH_X86 && defined(ASMJIT_BUILD_X86)
#define ASMJIT_BUILD_HOST
#endif
// ============================================================================
// [asmjit::Build - Globals - C++ Compiler and Features Detection]
// ============================================================================
#define ASMJIT_CXX_GNU 0
#define ASMJIT_CXX_MAKE_VER(MAJOR, MINOR) ((MAJOR) * 1000 + (MINOR))
// Intel Compiler [pretends to be GNU or MSC, so it must be checked first]:
// - https://software.intel.com/en-us/articles/c0x-features-supported-by-intel-c-compiler
// - https://software.intel.com/en-us/articles/c14-features-supported-by-intel-c-compiler
// - https://software.intel.com/en-us/articles/c17-features-supported-by-intel-c-compiler
#if defined(__INTEL_COMPILER)
// MSC Compiler:
// - https://msdn.microsoft.com/en-us/library/hh567368.aspx
//
// Version List:
// - 16.00.0 == VS2010
// - 17.00.0 == VS2012
// - 18.00.0 == VS2013
// - 19.00.0 == VS2015
// - 19.10.0 == VS2017
#elif defined(_MSC_VER) && defined(_MSC_FULL_VER)
// Clang Compiler [Pretends to be GNU, so it must be checked before]:
// - https://clang.llvm.org/cxx_status.html
#elif defined(__clang_major__) && defined(__clang_minor__) && defined(__clang_patchlevel__)
// GNU Compiler:
// - https://gcc.gnu.org/projects/cxx-status.html
#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
#undef ASMJIT_CXX_GNU
#define ASMJIT_CXX_GNU ASMJIT_CXX_MAKE_VER(__GNUC__, __GNUC_MINOR__)
#endif
// Compiler features detection macros.
#if defined(__clang__) && defined(__has_attribute)
#define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (__has_attribute(NAME))
#else
#define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (!(!(CHECK)))
#endif
// ============================================================================
// [asmjit::Build - Globals - API Decorators & Language Extensions]
// ============================================================================
// API (Export / Import).
#if !defined(ASMJIT_STATIC)
#if defined(_WIN32) && (defined(_MSC_VER) || defined(__MINGW32__))
#ifdef ASMJIT_EXPORTS
#define ASMJIT_API __declspec(dllexport)
#else
#define ASMJIT_API __declspec(dllimport)
#endif
#elif defined(_WIN32) && defined(__GNUC__)
#ifdef ASMJIT_EXPORTS
#define ASMJIT_API __attribute__((__dllexport__))
#else
#define ASMJIT_API __attribute__((__dllimport__))
#endif
#elif defined(__GNUC__)
#define ASMJIT_API __attribute__((__visibility__("default")))
#endif
#endif
#if !defined(ASMJIT_API)
#define ASMJIT_API
#endif
#if !defined(ASMJIT_VARAPI)
#define ASMJIT_VARAPI extern ASMJIT_API
#endif
// This is basically a workaround. When using MSVC and marking class as DLL
// export everything gets exported, which is unwanted in most projects. MSVC
// automatically exports typeinfo and vtable if at least one symbol of the
// class is exported. However, GCC has some strange behavior that even if
// one or more symbol is exported it doesn't export typeinfo unless the
// class itself is decorated with "visibility(default)" (i.e. ASMJIT_API).
#if !defined(_WIN32) && defined(__GNUC__)
#define ASMJIT_VIRTAPI ASMJIT_API
#else
#define ASMJIT_VIRTAPI
#endif
// Function attributes.
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__)
#define ASMJIT_INLINE inline __attribute__((__always_inline__))
#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER)
#define ASMJIT_INLINE __forceinline
#else
#define ASMJIT_INLINE inline
#endif
#if defined(__GNUC__)
#define ASMJIT_NOINLINE __attribute__((__noinline__))
#define ASMJIT_NORETURN __attribute__((__noreturn__))
#elif defined(_MSC_VER)
#define ASMJIT_NOINLINE __declspec(noinline)
#define ASMJIT_NORETURN __declspec(noreturn)
#else
#define ASMJIT_NOINLINE
#define ASMJIT_NORETURN
#endif
// Calling conventions.
#if ASMJIT_ARCH_X86 == 32 && defined(__GNUC__)
#define ASMJIT_CDECL __attribute__((__cdecl__))
#define ASMJIT_STDCALL __attribute__((__stdcall__))
#define ASMJIT_FASTCALL __attribute__((__fastcall__))
#define ASMJIT_REGPARM(N) __attribute__((__regparm__(N)))
#elif ASMJIT_ARCH_X86 == 32 && defined(_MSC_VER)
#define ASMJIT_CDECL __cdecl
#define ASMJIT_STDCALL __stdcall
#define ASMJIT_FASTCALL __fastcall
#define ASMJIT_REGPARM(N)
#else
#define ASMJIT_CDECL
#define ASMJIT_STDCALL
#define ASMJIT_FASTCALL
#define ASMJIT_REGPARM(N)
#endif
#if ASMJIT_ARCH_X86 && defined(_WIN32) && defined(_MSC_VER)
#define ASMJIT_VECTORCALL __vectorcall
#elif ASMJIT_ARCH_X86 && defined(_WIN32)
#define ASMJIT_VECTORCALL __attribute__((__vectorcall__))
#else
#define ASMJIT_VECTORCALL
#endif
// Type alignment (not allowed by C++11 'alignas' keyword).
#if defined(__GNUC__)
#define ASMJIT_ALIGN_TYPE(TYPE, N) __attribute__((__aligned__(N))) TYPE
#elif defined(_MSC_VER)
#define ASMJIT_ALIGN_TYPE(TYPE, N) __declspec(align(N)) TYPE
#else
#define ASMJIT_ALIGN_TYPE(TYPE, N) TYPE
#endif
//! \def ASMJIT_MAY_ALIAS
//!
//! Expands to `__attribute__((__may_alias__))` if supported.
#if defined(__GNUC__)
#define ASMJIT_MAY_ALIAS __attribute__((__may_alias__))
#else
#define ASMJIT_MAY_ALIAS
#endif
//! \def ASMJIT_LIKELY(...)
//!
//! Condition is likely to be taken (mostly error handling and edge cases).
//! \def ASMJIT_UNLIKELY(...)
//!
//! Condition is unlikely to be taken (mostly error handling and edge cases).
#if defined(__GNUC__)
#define ASMJIT_LIKELY(...) __builtin_expect(!!(__VA_ARGS__), 1)
#define ASMJIT_UNLIKELY(...) __builtin_expect(!!(__VA_ARGS__), 0)
#else
#define ASMJIT_LIKELY(...) (__VA_ARGS__)
#define ASMJIT_UNLIKELY(...) (__VA_ARGS__)
#endif
//! \def ASMJIT_FALLTHROUGH
//!
//! Portable [[fallthrough]] attribute.
#if defined(__clang__) && __cplusplus >= 201103L
#define ASMJIT_FALLTHROUGH [[clang::fallthrough]]
#elif defined(__GNUC__) && __GNUC__ >= 7
#define ASMJIT_FALLTHROUGH __attribute__((__fallthrough__))
#else
#define ASMJIT_FALLTHROUGH ((void)0) /* fallthrough */
#endif
//! \def ASMJIT_DEPRECATED
//!
//! Marks function, class, struct, enum, or anything else as deprecated.
#if defined(__GNUC__)
#define ASMJIT_DEPRECATED(MESSAGE) __attribute__((__deprecated__(MESSAGE)))
#if defined(__clang__)
#define ASMJIT_DEPRECATED_STRUCT(MESSAGE) __attribute__((__deprecated__(MESSAGE)))
#else
#define ASMJIT_DEPRECATED_STRUCT(MESSAGE) /* not usable if a deprecated function uses it */
#endif
#elif defined(_MSC_VER)
#define ASMJIT_DEPRECATED(MESSAGE) __declspec(deprecated(MESSAGE))
#define ASMJIT_DEPRECATED_STRUCT(MESSAGE) /* not usable if a deprecated function uses it */
#else
#define ASMJIT_DEPRECATED(MESSAGE)
#define ASMJIT_DEPRECATED_STRUCT(MESSAGE)
#endif
// Utilities.
#define ASMJIT_OFFSET_OF(STRUCT, MEMBER) ((int)(intptr_t)((const char*)&((const STRUCT*)0x100)->MEMBER) - 0x100)
#define ASMJIT_ARRAY_SIZE(X) uint32_t(sizeof(X) / sizeof(X[0]))
#if ASMJIT_CXX_HAS_ATTRIBUTE(no_sanitize, 0)
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize__("undefined")))
#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 9)
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize_undefined__))
#else
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF
#endif
// ============================================================================
// [asmjit::Build - Globals - Begin-Namespace / End-Namespace]
// ============================================================================
#if defined(__clang__)
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wconstant-logical-operand\"") \
_Pragma("clang diagnostic ignored \"-Wunnamed-type-template-args\"")
#define ASMJIT_END_NAMESPACE \
_Pragma("clang diagnostic pop") \
}
#elif defined(__GNUC__) && __GNUC__ == 4
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
#define ASMJIT_END_NAMESPACE \
_Pragma("GCC diagnostic pop") \
}
#elif defined(__GNUC__) && __GNUC__ >= 8
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wclass-memaccess\"")
#define ASMJIT_END_NAMESPACE \
_Pragma("GCC diagnostic pop") \
}
#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { \
__pragma(warning(push)) \
__pragma(warning(disable: 4127)) /* conditional expression is const */ \
__pragma(warning(disable: 4201)) /* nameless struct/union */
#define ASMJIT_END_NAMESPACE \
__pragma(warning(pop)) \
}
#endif
#if !defined(ASMJIT_BEGIN_NAMESPACE) && !defined(ASMJIT_END_NAMESPACE)
#define ASMJIT_BEGIN_NAMESPACE namespace asmjit {
#define ASMJIT_END_NAMESPACE }
#endif
#define ASMJIT_BEGIN_SUB_NAMESPACE(NAMESPACE) \
ASMJIT_BEGIN_NAMESPACE \
namespace NAMESPACE {
#define ASMJIT_END_SUB_NAMESPACE \
} \
ASMJIT_END_NAMESPACE
// ============================================================================
// [asmjit::Build - Globals - Utilities]
// ============================================================================
#define ASMJIT_NONCOPYABLE(...) \
private: \
__VA_ARGS__(const __VA_ARGS__& other) = delete; \
__VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
public:
#define ASMJIT_NONCONSTRUCTIBLE(...) \
private: \
__VA_ARGS__() = delete; \
__VA_ARGS__(const __VA_ARGS__& other) = delete; \
__VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
public:
// ============================================================================
// [asmjit::Build - Globals - Cleanup]
// ============================================================================
// Cleanup definitions that are only used within this header file.
#undef ASMJIT_CXX_GNU
#undef ASMJIT_CXX_MAKE_VER
#endif // ASMJIT_CORE_API_CONFIG_H_INCLUDED

View File

@ -0,0 +1,164 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
#define ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
// This file provides architecture-specific classes that are required in the
// core library. For example Imm operand allows to be created from arm::Shift
// in a const-expr way, so the arm::Shift must be provided. So this header
// file provides everything architecture-specific that is used by the Core API.
#include "../core/globals.h"
// ============================================================================
// [asmjit::arm]
// ============================================================================
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
//! \addtogroup asmjit_arm
//! \{
//! Represents ARM immediate shift operation type and value.
class Shift {
public:
//! Operation predicate (ARM) describes either SHIFT or EXTEND operation.
//!
//! \note The constants are AsmJit specific. The first 5 values describe real
//! constants on ARM32 and AArch64 hardware, however, the addition constants
//! that describe extend modes are specific to AsmJit and would be translated
//! to the AArch64 specific constants by the assembler.
enum Op : uint32_t {
//! Shift left logical operation (default).
//!
//! Available to all ARM architectures.
kOpLSL = 0x00u,
//! Shift right logical operation.
//!
//! Available to all ARM architectures.
kOpLSR = 0x01u,
//! Shift right arithmetic operation.
//!
//! Available to all ARM architectures.
kOpASR = 0x02u,
//! Rotate right operation.
//!
//! \note Not available in AArch64 mode.
kOpROR = 0x03u,
//! Rotate right with carry operation (encoded as `kShiftROR` with zero).
//!
//! \note Not available in AArch64 mode.
kOpRRX = 0x04u,
//! Shift left by filling low order bits with ones.
kOpMSL = 0x05u,
//! UXTN extend register operation (AArch64 only).
kOpUXTB = 0x06u,
//! UXTH extend register operation (AArch64 only).
kOpUXTH = 0x07u,
//! UXTW extend register operation (AArch64 only).
kOpUXTW = 0x08u,
//! UXTX extend register operation (AArch64 only).
kOpUXTX = 0x09u,
//! SXTB extend register operation (AArch64 only).
kOpSXTB = 0x0Au,
//! SXTH extend register operation (AArch64 only).
kOpSXTH = 0x0Bu,
//! SXTW extend register operation (AArch64 only).
kOpSXTW = 0x0Cu,
//! SXTX extend register operation (AArch64 only).
kOpSXTX = 0x0Du
// NOTE: 0xE and 0xF are used by memory operand to specify POST|PRE offset mode.
};
//! Shift operation.
uint32_t _op;
//! Shift Value.
uint32_t _value;
//! Default constructed Shift is not initialized.
inline Shift() noexcept = default;
//! Copy constructor (default)
constexpr Shift(const Shift& other) noexcept = default;
//! Constructs Shift from operation `op` and shift `value`.
constexpr Shift(uint32_t op, uint32_t value) noexcept
: _op(op),
_value(value) {}
//! Returns the shift operation.
constexpr uint32_t op() const noexcept { return _op; }
//! Returns the shift smount.
constexpr uint32_t value() const noexcept { return _value; }
//! Sets shift operation to `op`.
inline void setOp(uint32_t op) noexcept { _op = op; }
//! Sets shift amount to `value`.
inline void setValue(uint32_t value) noexcept { _value = value; }
};
//! Constructs a `LSL #value` shift (logical shift left).
static constexpr Shift lsl(uint32_t value) noexcept { return Shift(Shift::kOpLSL, value); }
//! Constructs a `LSR #value` shift (logical shift right).
static constexpr Shift lsr(uint32_t value) noexcept { return Shift(Shift::kOpLSR, value); }
//! Constructs a `ASR #value` shift (arithmetic shift right).
static constexpr Shift asr(uint32_t value) noexcept { return Shift(Shift::kOpASR, value); }
//! Constructs a `ROR #value` shift (rotate right).
static constexpr Shift ror(uint32_t value) noexcept { return Shift(Shift::kOpROR, value); }
//! Constructs a `RRX` shift (rotate with carry by 1).
static constexpr Shift rrx() noexcept { return Shift(Shift::kOpRRX, 0); }
//! Constructs a `MSL #value` shift (logical shift left filling ones).
static constexpr Shift msl(uint32_t value) noexcept { return Shift(Shift::kOpMSL, value); }
//! Constructs a `UXTB #value` extend and shift (unsigned byte extend).
static constexpr Shift uxtb(uint32_t value) noexcept { return Shift(Shift::kOpUXTB, value); }
//! Constructs a `UXTH #value` extend and shift (unsigned hword extend).
static constexpr Shift uxth(uint32_t value) noexcept { return Shift(Shift::kOpUXTH, value); }
//! Constructs a `UXTW #value` extend and shift (unsigned word extend).
static constexpr Shift uxtw(uint32_t value) noexcept { return Shift(Shift::kOpUXTW, value); }
//! Constructs a `UXTX #value` extend and shift (unsigned dword extend).
static constexpr Shift uxtx(uint32_t value) noexcept { return Shift(Shift::kOpUXTX, value); }
//! Constructs a `SXTB #value` extend and shift (signed byte extend).
static constexpr Shift sxtb(uint32_t value) noexcept { return Shift(Shift::kOpSXTB, value); }
//! Constructs a `SXTH #value` extend and shift (signed hword extend).
static constexpr Shift sxth(uint32_t value) noexcept { return Shift(Shift::kOpSXTH, value); }
//! Constructs a `SXTW #value` extend and shift (signed word extend).
static constexpr Shift sxtw(uint32_t value) noexcept { return Shift(Shift::kOpSXTW, value); }
//! Constructs a `SXTX #value` extend and shift (signed dword extend).
static constexpr Shift sxtx(uint32_t value) noexcept { return Shift(Shift::kOpSXTX, value); }
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED

View File

@ -0,0 +1,155 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/misc_p.h"
#ifdef ASMJIT_BUILD_X86
#include "../x86/x86archtraits_p.h"
#endif
#ifdef ASMJIT_BUILD_ARM
#include "../arm/armarchtraits_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ArchTraits]
// ============================================================================
static const constexpr ArchTraits noArchTraits = {
0xFF, // SP.
0xFF, // FP.
0xFF, // LR.
0xFF, // PC.
{ 0, 0, 0 }, // Reserved.
0, // HW stack alignment.
0, // Min stack offset.
0, // Max stack offset.
{ 0, 0, 0, 0}, // ISA features [Gp, Vec, Other0, Other1].
{ { 0 } }, // RegTypeToSignature.
{ 0 }, // RegTypeToTypeId.
{ 0 } // TypeIdToRegType.
};
ASMJIT_VARAPI const ArchTraits _archTraits[Environment::kArchCount] = {
// No architecture.
noArchTraits,
// X86/X86 architectures.
#ifdef ASMJIT_BUILD_X86
x86::x86ArchTraits,
x86::x64ArchTraits,
#else
noArchTraits,
noArchTraits,
#endif
// RISCV32/RISCV64 architectures.
noArchTraits,
noArchTraits,
// ARM architecture
noArchTraits,
// AArch64 architecture.
#ifdef ASMJIT_BUILD_ARM
arm::a64ArchTraits,
#else
noArchTraits,
#endif
// ARM/Thumb architecture.
noArchTraits,
// Reserved.
noArchTraits,
// MIPS32/MIPS64
noArchTraits,
noArchTraits
};
// ============================================================================
// [asmjit::ArchUtils]
// ============================================================================
ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegInfo(uint32_t arch, uint32_t typeId, uint32_t* typeIdOut, RegInfo* regInfoOut) noexcept {
const ArchTraits& archTraits = ArchTraits::byArch(arch);
// Passed RegType instead of TypeId?
if (typeId <= BaseReg::kTypeMax)
typeId = archTraits.regTypeToTypeId(typeId);
if (ASMJIT_UNLIKELY(!Type::isValid(typeId)))
return DebugUtils::errored(kErrorInvalidTypeId);
// First normalize architecture dependent types.
if (Type::isAbstract(typeId)) {
bool is32Bit = Environment::is32Bit(arch);
if (typeId == Type::kIdIntPtr)
typeId = is32Bit ? Type::kIdI32 : Type::kIdI64;
else
typeId = is32Bit ? Type::kIdU32 : Type::kIdU64;
}
// Type size helps to construct all groups of registers.
// TypeId is invalid if the size is zero.
uint32_t size = Type::sizeOf(typeId);
if (ASMJIT_UNLIKELY(!size))
return DebugUtils::errored(kErrorInvalidTypeId);
if (ASMJIT_UNLIKELY(typeId == Type::kIdF80))
return DebugUtils::errored(kErrorInvalidUseOfF80);
uint32_t regType = 0;
if (typeId >= Type::_kIdBaseStart && typeId < Type::_kIdVec32Start) {
regType = archTraits._typeIdToRegType[typeId - Type::_kIdBaseStart];
if (!regType) {
if (typeId == Type::kIdI64 || typeId == Type::kIdU64)
return DebugUtils::errored(kErrorInvalidUseOfGpq);
else
return DebugUtils::errored(kErrorInvalidTypeId);
}
}
else {
if (size <= 8 && archTraits._regInfo[BaseReg::kTypeVec64].isValid())
regType = BaseReg::kTypeVec64;
else if (size <= 16 && archTraits._regInfo[BaseReg::kTypeVec128].isValid())
regType = BaseReg::kTypeVec128;
else if (size == 32 && archTraits._regInfo[BaseReg::kTypeVec256].isValid())
regType = BaseReg::kTypeVec256;
else if (archTraits._regInfo[BaseReg::kTypeVec512].isValid())
regType = BaseReg::kTypeVec512;
else
return DebugUtils::errored(kErrorInvalidTypeId);
}
*typeIdOut = typeId;
regInfoOut->reset(archTraits.regTypeToSignature(regType));
return kErrorOk;
}
ASMJIT_END_NAMESPACE

174
deps/asmjit/src/asmjit/core/archtraits.h vendored Normal file
View File

@ -0,0 +1,174 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
#define ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
#include "../core/environment.h"
#include "../core/operand.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::ArchTraits]
// ============================================================================
//! Architecture traits used by Function API and Compiler's register allocator.
struct ArchTraits {
//! ISA features for each register group.
enum IsaFeatures : uint32_t {
//! ISA features a register swap by using a single instruction.
kIsaFeatureSwap = 0x01u,
//! ISA features a push/pop like instruction for this register group.
kIsaFeaturePushPop = 0x02u,
};
//! Stack pointer register id.
uint8_t _spRegId;
//! Frame pointer register id.
uint8_t _fpRegId;
//! Link register id.
uint8_t _linkRegId;
//! Instruction pointer (or program counter) register id, if accessible.
uint8_t _ipRegId;
// Reserved.
uint8_t _reserved[3];
//! Hardware stack alignment requirement.
uint8_t _hwStackAlignment;
//! Minimum addressable offset on stack guaranteed for all instructions.
uint32_t _minStackOffset;
//! Maximum addressable offset on stack depending on specific instruction.
uint32_t _maxStackOffset;
//! Flags for each virtual register group (always covers GP and Vec groups).
uint8_t _isaFlags[BaseReg::kGroupVirt];
//! Maps register type into a signature, that provides group, size and can
//! be used to construct register operands.
RegInfo _regInfo[BaseReg::kTypeMax + 1];
//! Maps a register to type-id, see \ref Type::Id.
uint8_t _regTypeToTypeId[BaseReg::kTypeMax + 1];
//! Maps base TypeId values (from TypeId::_kIdBaseStart) to register types, see \ref Type::Id.
uint8_t _typeIdToRegType[32];
//! Resets all members to zeros.
inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
//! \name Accessors
//! \{
//! Returns stack pointer register id.
inline constexpr uint32_t spRegId() const noexcept { return _spRegId; }
//! Returns stack frame register id.
inline constexpr uint32_t fpRegId() const noexcept { return _fpRegId; }
//! Returns link register id, if the architecture provides it.
inline constexpr uint32_t linkRegId() const noexcept { return _linkRegId; }
//! Returns instruction pointer register id, if the architecture provides it.
inline constexpr uint32_t ipRegId() const noexcept { return _ipRegId; }
//! Returns a hardware stack alignment requirement.
//!
//! \note This is a hardware constraint. Architectures that don't constrain
//! it would return the lowest alignment (1), however, some architectures may
//! constrain the alignment, for example AArch64 requires 16-byte alignment.
inline constexpr uint32_t hwStackAlignment() const noexcept { return _hwStackAlignment; }
//! Tests whether the architecture provides link register, which is used across
//! function calls. If the link register is not provided then a function call
//! pushes the return address on stack (X86/X64).
inline constexpr bool hasLinkReg() const noexcept { return _linkRegId != BaseReg::kIdBad; }
//! Returns minimum addressable offset on stack guaranteed for all instructions.
inline constexpr uint32_t minStackOffset() const noexcept { return _minStackOffset; }
//! Returns maximum addressable offset on stack depending on specific instruction.
inline constexpr uint32_t maxStackOffset() const noexcept { return _maxStackOffset; }
//! Returns ISA flags of the given register `group`.
inline constexpr uint32_t isaFlags(uint32_t group) const noexcept { return _isaFlags[group]; }
//! Tests whether the given register `group` has the given `flag` set.
inline constexpr bool hasIsaFlag(uint32_t group, uint32_t flag) const noexcept { return (_isaFlags[group] & flag) != 0; }
//! Tests whether the ISA provides register swap instruction for the given register `group`.
inline constexpr bool hasSwap(uint32_t group) const noexcept { return hasIsaFlag(group, kIsaFeatureSwap); }
//! Tests whether the ISA provides push/pop instructions for the given register `group`.
inline constexpr bool hasPushPop(uint32_t group) const noexcept { return hasIsaFlag(group, kIsaFeaturePushPop); }
inline uint32_t hasRegType(uint32_t rType) const noexcept {
return rType <= BaseReg::kTypeMax && _regInfo[rType].signature() != 0;
}
inline uint32_t regTypeToSignature(uint32_t rType) const noexcept {
ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
return _regInfo[rType].signature();
}
inline uint32_t regTypeToGroup(uint32_t rType) const noexcept {
ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
return _regInfo[rType].group();
}
inline uint32_t regTypeToSize(uint32_t rType) const noexcept {
ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
return _regInfo[rType].size();
}
inline uint32_t regTypeToTypeId(uint32_t rType) const noexcept {
ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
return _regTypeToTypeId[rType];
}
//! \}
//! \name Statics
//! \{
//! Returns a const reference to `ArchTraits` for the given architecture `arch`.
static inline const ArchTraits& byArch(uint32_t arch) noexcept;
//! \}
};
ASMJIT_VARAPI const ArchTraits _archTraits[Environment::kArchCount];
inline const ArchTraits& ArchTraits::byArch(uint32_t arch) noexcept { return _archTraits[arch & ~Environment::kArchBigEndianMask]; }
// ============================================================================
// [asmjit::ArchUtils]
// ============================================================================
//! Architecture utilities.
namespace ArchUtils {
ASMJIT_API Error typeIdToRegInfo(uint32_t arch, uint32_t typeId, uint32_t* typeIdOut, RegInfo* regInfo) noexcept;
} // {ArchUtils}
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ARCHTRAITS_H_INCLUDED

View File

@ -0,0 +1,406 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/assembler.h"
#include "../core/codewriter_p.h"
#include "../core/constpool.h"
#include "../core/emitterutils_p.h"
#include "../core/formatter.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::BaseAssembler - Construction / Destruction]
// ============================================================================
BaseAssembler::BaseAssembler() noexcept
: BaseEmitter(kTypeAssembler) {}
BaseAssembler::~BaseAssembler() noexcept {}
// ============================================================================
// [asmjit::BaseAssembler - Buffer Management]
// ============================================================================
Error BaseAssembler::setOffset(size_t offset) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
size_t size = Support::max<size_t>(_section->bufferSize(), this->offset());
if (ASMJIT_UNLIKELY(offset > size))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
_bufferPtr = _bufferData + offset;
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Section Management]
// ============================================================================
static void BaseAssembler_initSection(BaseAssembler* self, Section* section) noexcept {
uint8_t* p = section->_buffer._data;
self->_section = section;
self->_bufferData = p;
self->_bufferPtr = p + section->_buffer._size;
self->_bufferEnd = p + section->_buffer._capacity;
}
Error BaseAssembler::section(Section* section) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section)
return reportError(DebugUtils::errored(kErrorInvalidSection));
#ifndef ASMJIT_NO_LOGGING
if (_logger)
_logger->logf(".section %s {#%u}\n", section->name(), section->id());
#endif
BaseAssembler_initSection(this, section);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Label Management]
// ============================================================================
Label BaseAssembler::newLabel() {
uint32_t labelId = Globals::kInvalidId;
if (ASMJIT_LIKELY(_code)) {
LabelEntry* le;
Error err = _code->newLabelEntry(&le);
if (ASMJIT_UNLIKELY(err))
reportError(err);
else
labelId = le->id();
}
return Label(labelId);
}
Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) {
uint32_t labelId = Globals::kInvalidId;
if (ASMJIT_LIKELY(_code)) {
LabelEntry* le;
Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId);
if (ASMJIT_UNLIKELY(err))
reportError(err);
else
labelId = le->id();
}
return Label(labelId);
}
Error BaseAssembler::bind(const Label& label) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
Error err = _code->bindLabel(label, _section->id(), offset());
#ifndef ASMJIT_NO_LOGGING
if (_logger)
EmitterUtils::logLabelBound(this, label);
#endif
resetInlineComment();
if (err)
return reportError(err);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Embed]
// ============================================================================
#ifndef ASMJIT_NO_LOGGING
struct DataSizeByPower {
char str[4];
};
static const DataSizeByPower dataSizeByPowerTable[] = {
{ "db" },
{ "dw" },
{ "dd" },
{ "dq" }
};
#endif
Error BaseAssembler::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
if (dataSize == 0)
return kErrorOk;
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
writer.emitData(data, dataSize);
#ifndef ASMJIT_NO_LOGGING
if (_logger)
_logger->logBinary(data, dataSize);
#endif
writer.done(this);
return kErrorOk;
}
Error BaseAssembler::embedDataArray(uint32_t typeId, const void* data, size_t itemCcount, size_t repeatCount) {
uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize());
uint32_t finalTypeId = Type::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!Type::isValid(finalTypeId)))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (itemCcount == 0 || repeatCount == 0)
return kErrorOk;
uint32_t typeSize = Type::sizeOf(finalTypeId);
Support::FastUInt8 of = 0;
size_t dataSize = Support::mulOverflow(itemCcount, size_t(typeSize), &of);
size_t totalSize = Support::mulOverflow(dataSize, repeatCount, &of);
if (ASMJIT_UNLIKELY(of))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, totalSize));
#ifndef ASMJIT_NO_LOGGING
const uint8_t* start = writer.cursor();
#endif
for (size_t i = 0; i < repeatCount; i++) {
writer.emitData(data, dataSize);
}
#ifndef ASMJIT_NO_LOGGING
if (_logger)
_logger->logBinary(start, totalSize);
#endif
writer.done(this);
return kErrorOk;
}
Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
if (ASMJIT_UNLIKELY(!isLabelValid(label)))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label));
size_t size = pool.size();
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, size));
pool.fill(writer.cursor());
#ifndef ASMJIT_NO_LOGGING
if (_logger)
_logger->logBinary(writer.cursor(), size);
#endif
writer.advance(size);
writer.done(this);
return kErrorOk;
}
Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
ASMJIT_ASSERT(_code != nullptr);
RelocEntry* re;
LabelEntry* le = _code->labelEntry(label);
if (ASMJIT_UNLIKELY(!le))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
if (dataSize == 0)
dataSize = registerSize();
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<256> sb;
sb.appendFormat("%s ", dataSizeByPowerTable[Support::ctz(dataSize)].str);
Formatter::formatLabel(sb, 0, this, label.id());
sb.append('\n');
_logger->log(sb);
}
#endif
Error err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
re->_sourceSectionId = _section->id();
re->_sourceOffset = offset();
re->_format.resetToDataValue(uint32_t(dataSize));
if (le->isBound()) {
re->_targetSectionId = le->section()->id();
re->_payload = le->offset();
}
else {
OffsetFormat of;
of.resetToDataValue(uint32_t(dataSize));
LabelLink* link = _code->newLabelLink(le, _section->id(), offset(), 0, of);
if (ASMJIT_UNLIKELY(!link))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
link->relocId = re->id();
}
// Emit dummy DWORD/QWORD depending on the data size.
writer.emitZeros(dataSize);
writer.done(this);
return kErrorOk;
}
Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
LabelEntry* labelEntry = _code->labelEntry(label);
LabelEntry* baseEntry = _code->labelEntry(base);
if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
if (dataSize == 0)
dataSize = registerSize();
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<256> sb;
sb.appendFormat(".%s (", dataSizeByPowerTable[Support::ctz(dataSize)].str);
Formatter::formatLabel(sb, 0, this, label.id());
sb.append(" - ");
Formatter::formatLabel(sb, 0, this, base.id());
sb.append(")\n");
_logger->log(sb);
}
#endif
// If both labels are bound within the same section it means the delta can be calculated now.
if (labelEntry->isBound() && baseEntry->isBound() && labelEntry->section() == baseEntry->section()) {
uint64_t delta = labelEntry->offset() - baseEntry->offset();
writer.emitValueLE(delta, dataSize);
}
else {
RelocEntry* re;
Error err = _code->newRelocEntry(&re, RelocEntry::kTypeExpression);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
Expression* exp = _code->_zone.newT<Expression>();
if (ASMJIT_UNLIKELY(!exp))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
exp->reset();
exp->opType = Expression::kOpSub;
exp->setValueAsLabel(0, labelEntry);
exp->setValueAsLabel(1, baseEntry);
re->_format.resetToDataValue(dataSize);
re->_sourceSectionId = _section->id();
re->_sourceOffset = offset();
re->_payload = (uint64_t)(uintptr_t)exp;
writer.emitZeros(dataSize);
}
writer.done(this);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Comment]
// ============================================================================
Error BaseAssembler::comment(const char* data, size_t size) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
_logger->log(data, size);
_logger->log("\n", 1);
return kErrorOk;
}
#else
DebugUtils::unused(data, size);
#endif
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Events]
// ============================================================================
Error BaseAssembler::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
// Attach to the end of the .text section.
BaseAssembler_initSection(this, code->_sections[0]);
return kErrorOk;
}
Error BaseAssembler::onDetach(CodeHolder* code) noexcept {
_section = nullptr;
_bufferData = nullptr;
_bufferEnd = nullptr;
_bufferPtr = nullptr;
return Base::onDetach(code);
}
ASMJIT_END_NAMESPACE

152
deps/asmjit/src/asmjit/core/assembler.h vendored Normal file
View File

@ -0,0 +1,152 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ASSEMBLER_H_INCLUDED
#define ASMJIT_CORE_ASSEMBLER_H_INCLUDED
#include "../core/codeholder.h"
#include "../core/datatypes.h"
#include "../core/emitter.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_assembler
//! \{
// ============================================================================
// [asmjit::BaseAssembler]
// ============================================================================
//! Base assembler.
//!
//! This is a base class that provides interface used by architecture specific
//! assembler implementations. Assembler doesn't hold any data, instead it's
//! attached to \ref CodeHolder, which provides all the data that Assembler
//! needs and which can be altered by it.
//!
//! Check out architecture specific assemblers for more details and examples:
//!
//! - \ref x86::Assembler - X86/X64 assembler implementation.
class ASMJIT_VIRTAPI BaseAssembler : public BaseEmitter {
public:
ASMJIT_NONCOPYABLE(BaseAssembler)
typedef BaseEmitter Base;
//! Current section where the assembling happens.
Section* _section = nullptr;
//! Start of the CodeBuffer of the current section.
uint8_t* _bufferData = nullptr;
//! End (first invalid byte) of the current section.
uint8_t* _bufferEnd = nullptr;
//! Pointer in the CodeBuffer of the current section.
uint8_t* _bufferPtr = nullptr;
//! \name Construction & Destruction
//! \{
//! Creates a new `BaseAssembler` instance.
ASMJIT_API BaseAssembler() noexcept;
//! Destroys the `BaseAssembler` instance.
ASMJIT_API virtual ~BaseAssembler() noexcept;
//! \}
//! \name Code-Buffer Management
//! \{
//! Returns the capacity of the current CodeBuffer.
inline size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); }
//! Returns the number of remaining bytes in the current CodeBuffer.
inline size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); }
//! Returns the current position in the CodeBuffer.
inline size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); }
//! Sets the current position in the CodeBuffer to `offset`.
//!
//! \note The `offset` cannot be greater than buffer size even if it's
//! within the buffer's capacity.
ASMJIT_API Error setOffset(size_t offset);
//! Returns the start of the CodeBuffer in the current section.
inline uint8_t* bufferData() const noexcept { return _bufferData; }
//! Returns the end (first invalid byte) in the current section.
inline uint8_t* bufferEnd() const noexcept { return _bufferEnd; }
//! Returns the current pointer in the CodeBuffer in the current section.
inline uint8_t* bufferPtr() const noexcept { return _bufferPtr; }
//! \}
//! \name Section Management
//! \{
//! Returns the current section.
inline Section* currentSection() const noexcept { return _section; }
ASMJIT_API Error section(Section* section) override;
//! \}
//! \name Label Management
//! \{
ASMJIT_API Label newLabel() override;
ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) override;
ASMJIT_API Error bind(const Label& label) override;
//! \}
//! \name Embed
//! \{
ASMJIT_API Error embed(const void* data, size_t dataSize) override;
ASMJIT_API Error embedDataArray(uint32_t typeId, const void* data, size_t itemCcount, size_t repeatCount = 1) override;
ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
ASMJIT_API Error embedLabel(const Label& label, size_t dataSize = 0) override;
ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) override;
//! \}
//! \name Comment
//! \{
ASMJIT_API Error comment(const char* data, size_t size = SIZE_MAX) override;
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ASSEMBLER_H_INCLUDED

920
deps/asmjit/src/asmjit/core/builder.cpp vendored Normal file
View File

@ -0,0 +1,920 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_BUILDER
#include "../core/builder.h"
#include "../core/emitterutils_p.h"
#include "../core/errorhandler.h"
#include "../core/formatter.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::PostponedErrorHandler (Internal)]
// ============================================================================
//! Postponed error handler that never throws. Used as a temporal error handler
//! to run passes. If error occurs, the caller is notified and will call the
//! real error handler, that can throw.
class PostponedErrorHandler : public ErrorHandler {
public:
void handleError(Error err, const char* message, BaseEmitter* origin) override {
DebugUtils::unused(err, origin);
_message.assign(message);
}
StringTmp<128> _message;
};
// ============================================================================
// [asmjit::BaseBuilder - Utilities]
// ============================================================================
static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept {
for (Pass* pass : self->_passes)
pass->~Pass();
self->_passes.reset();
}
// ============================================================================
// [asmjit::BaseBuilder - Construction / Destruction]
// ============================================================================
BaseBuilder::BaseBuilder() noexcept
: BaseEmitter(kTypeBuilder),
_codeZone(32768 - Zone::kBlockOverhead),
_dataZone(16384 - Zone::kBlockOverhead),
_passZone(65536 - Zone::kBlockOverhead),
_allocator(&_codeZone) {}
BaseBuilder::~BaseBuilder() noexcept {
BaseBuilder_deletePasses(this);
}
// ============================================================================
// [asmjit::BaseBuilder - Node Management]
// ============================================================================
Error BaseBuilder::_newInstNode(InstNode** out, uint32_t instId, uint32_t instOptions, uint32_t opCount) {
uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
if (ASMJIT_UNLIKELY(!node))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
*out = new(node) InstNode(this, instId, instOptions, opCount, opCapacity);
return kErrorOk;
}
Error BaseBuilder::_newLabelNode(LabelNode** out) {
*out = nullptr;
ASMJIT_PROPAGATE(_newNodeT<LabelNode>(out));
return registerLabelNode(*out);
}
Error BaseBuilder::_newAlignNode(AlignNode** out, uint32_t alignMode, uint32_t alignment) {
*out = nullptr;
return _newNodeT<AlignNode>(out, alignMode, alignment);
}
Error BaseBuilder::_newEmbedDataNode(EmbedDataNode** out, uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount) {
*out = nullptr;
uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize());
uint32_t finalTypeId = Type::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!Type::isValid(finalTypeId)))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
uint32_t typeSize = Type::sizeOf(finalTypeId);
Support::FastUInt8 of = 0;
size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
if (ASMJIT_UNLIKELY(of))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedDataNode>(&node));
node->_embed._typeId = uint8_t(typeId);
node->_embed._typeSize = uint8_t(typeSize);
node->_itemCount = itemCount;
node->_repeatCount = repeatCount;
uint8_t* dstData = node->_inlineData;
if (dataSize > EmbedDataNode::kInlineBufferSize) {
dstData = static_cast<uint8_t*>(_dataZone.alloc(dataSize, 8));
if (ASMJIT_UNLIKELY(!dstData))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
node->_externalData = dstData;
}
if (data)
memcpy(dstData, data, dataSize);
*out = node;
return kErrorOk;
}
Error BaseBuilder::_newConstPoolNode(ConstPoolNode** out) {
*out = nullptr;
ASMJIT_PROPAGATE(_newNodeT<ConstPoolNode>(out));
return registerLabelNode(*out);
}
Error BaseBuilder::_newCommentNode(CommentNode** out, const char* data, size_t size) {
*out = nullptr;
if (data) {
if (size == SIZE_MAX)
size = strlen(data);
if (size > 0) {
data = static_cast<char*>(_dataZone.dup(data, size, true));
if (ASMJIT_UNLIKELY(!data))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
}
return _newNodeT<CommentNode>(out, data);
}
BaseNode* BaseBuilder::addNode(BaseNode* node) noexcept {
ASMJIT_ASSERT(node);
ASMJIT_ASSERT(!node->_prev);
ASMJIT_ASSERT(!node->_next);
ASMJIT_ASSERT(!node->isActive());
if (!_cursor) {
if (!_firstNode) {
_firstNode = node;
_lastNode = node;
}
else {
node->_next = _firstNode;
_firstNode->_prev = node;
_firstNode = node;
}
}
else {
BaseNode* prev = _cursor;
BaseNode* next = _cursor->next();
node->_prev = prev;
node->_next = next;
prev->_next = node;
if (next)
next->_prev = node;
else
_lastNode = node;
}
node->addFlags(BaseNode::kFlagIsActive);
if (node->isSection())
_dirtySectionLinks = true;
_cursor = node;
return node;
}
BaseNode* BaseBuilder::addAfter(BaseNode* node, BaseNode* ref) noexcept {
ASMJIT_ASSERT(node);
ASMJIT_ASSERT(ref);
ASMJIT_ASSERT(!node->_prev);
ASMJIT_ASSERT(!node->_next);
BaseNode* prev = ref;
BaseNode* next = ref->next();
node->_prev = prev;
node->_next = next;
node->addFlags(BaseNode::kFlagIsActive);
if (node->isSection())
_dirtySectionLinks = true;
prev->_next = node;
if (next)
next->_prev = node;
else
_lastNode = node;
return node;
}
BaseNode* BaseBuilder::addBefore(BaseNode* node, BaseNode* ref) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(!node->_prev);
ASMJIT_ASSERT(!node->_next);
ASMJIT_ASSERT(!node->isActive());
ASMJIT_ASSERT(ref != nullptr);
ASMJIT_ASSERT(ref->isActive());
BaseNode* prev = ref->prev();
BaseNode* next = ref;
node->_prev = prev;
node->_next = next;
node->addFlags(BaseNode::kFlagIsActive);
if (node->isSection())
_dirtySectionLinks = true;
next->_prev = node;
if (prev)
prev->_next = node;
else
_firstNode = node;
return node;
}
BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept {
if (!node->isActive())
return node;
BaseNode* prev = node->prev();
BaseNode* next = node->next();
if (_firstNode == node)
_firstNode = next;
else
prev->_next = next;
if (_lastNode == node)
_lastNode = prev;
else
next->_prev = prev;
node->_prev = nullptr;
node->_next = nullptr;
node->clearFlags(BaseNode::kFlagIsActive);
if (node->isSection())
_dirtySectionLinks = true;
if (_cursor == node)
_cursor = prev;
return node;
}
void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept {
if (first == last) {
removeNode(first);
return;
}
if (!first->isActive())
return;
BaseNode* prev = first->prev();
BaseNode* next = last->next();
if (_firstNode == first)
_firstNode = next;
else
prev->_next = next;
if (_lastNode == last)
_lastNode = prev;
else
next->_prev = prev;
BaseNode* node = first;
uint32_t didRemoveSection = false;
for (;;) {
next = node->next();
ASMJIT_ASSERT(next != nullptr);
node->_prev = nullptr;
node->_next = nullptr;
node->clearFlags(BaseNode::kFlagIsActive);
didRemoveSection |= uint32_t(node->isSection());
if (_cursor == node)
_cursor = prev;
if (node == last)
break;
node = next;
}
if (didRemoveSection)
_dirtySectionLinks = true;
}
BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept {
BaseNode* old = _cursor;
_cursor = node;
return old;
}
// ============================================================================
// [asmjit::BaseBuilder - Section]
// ============================================================================
Error BaseBuilder::sectionNodeOf(SectionNode** out, uint32_t sectionId) {
*out = nullptr;
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId)))
return reportError(DebugUtils::errored(kErrorInvalidSection));
if (sectionId >= _sectionNodes.size()) {
Error err = _sectionNodes.reserve(&_allocator, sectionId + 1);
if (ASMJIT_UNLIKELY(err != kErrorOk))
return reportError(err);
}
SectionNode* node = nullptr;
if (sectionId < _sectionNodes.size())
node = _sectionNodes[sectionId];
if (!node) {
ASMJIT_PROPAGATE(_newNodeT<SectionNode>(&node, sectionId));
// We have already reserved enough space, this cannot fail now.
if (sectionId >= _sectionNodes.size())
_sectionNodes.resize(&_allocator, sectionId + 1);
_sectionNodes[sectionId] = node;
}
*out = node;
return kErrorOk;
}
Error BaseBuilder::section(Section* section) {
SectionNode* node;
ASMJIT_PROPAGATE(sectionNodeOf(&node, section->id()));
if (!node->isActive()) {
// Insert the section at the end if it was not part of the code.
addAfter(node, lastNode());
_cursor = node;
}
else {
// This is a bit tricky. We cache section links to make sure that
// switching sections doesn't involve traversal in linked-list unless
// the position of the section has changed.
if (hasDirtySectionLinks())
updateSectionLinks();
if (node->_nextSection)
_cursor = node->_nextSection->_prev;
else
_cursor = _lastNode;
}
return kErrorOk;
}
void BaseBuilder::updateSectionLinks() noexcept {
if (!_dirtySectionLinks)
return;
BaseNode* node_ = _firstNode;
SectionNode* currentSection = nullptr;
while (node_) {
if (node_->isSection()) {
if (currentSection)
currentSection->_nextSection = node_->as<SectionNode>();
currentSection = node_->as<SectionNode>();
}
node_ = node_->next();
}
if (currentSection)
currentSection->_nextSection = nullptr;
_dirtySectionLinks = false;
}
// ============================================================================
// [asmjit::BaseBuilder - Labels]
// ============================================================================
Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) {
*out = nullptr;
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
uint32_t index = labelId;
if (ASMJIT_UNLIKELY(index >= _code->labelCount()))
return DebugUtils::errored(kErrorInvalidLabel);
if (index >= _labelNodes.size())
ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, index + 1));
LabelNode* node = _labelNodes[index];
if (!node) {
ASMJIT_PROPAGATE(_newNodeT<LabelNode>(&node, labelId));
_labelNodes[index] = node;
}
*out = node;
return kErrorOk;
}
Error BaseBuilder::registerLabelNode(LabelNode* node) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
LabelEntry* le;
ASMJIT_PROPAGATE(_code->newLabelEntry(&le));
uint32_t labelId = le->id();
// We just added one label so it must be true.
ASMJIT_ASSERT(_labelNodes.size() < labelId + 1);
ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, labelId + 1));
_labelNodes[labelId] = node;
node->_labelId = labelId;
return kErrorOk;
}
static Error BaseBuilder_newLabelInternal(BaseBuilder* self, uint32_t labelId) {
ASMJIT_ASSERT(self->_labelNodes.size() < labelId + 1);
uint32_t growBy = labelId - self->_labelNodes.size();
Error err = self->_labelNodes.willGrow(&self->_allocator, growBy);
if (ASMJIT_UNLIKELY(err))
return self->reportError(err);
LabelNode* node;
ASMJIT_PROPAGATE(self->_newNodeT<LabelNode>(&node, labelId));
self->_labelNodes.resize(&self->_allocator, labelId + 1);
self->_labelNodes[labelId] = node;
node->_labelId = labelId;
return kErrorOk;
}
Label BaseBuilder::newLabel() {
uint32_t labelId = Globals::kInvalidId;
LabelEntry* le;
if (_code &&
_code->newLabelEntry(&le) == kErrorOk &&
BaseBuilder_newLabelInternal(this, le->id()) == kErrorOk) {
labelId = le->id();
}
return Label(labelId);
}
Label BaseBuilder::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) {
uint32_t labelId = Globals::kInvalidId;
LabelEntry* le;
if (_code &&
_code->newNamedLabelEntry(&le, name, nameSize, type, parentId) == kErrorOk &&
BaseBuilder_newLabelInternal(this, le->id()) == kErrorOk) {
labelId = le->id();
}
return Label(labelId);
}
Error BaseBuilder::bind(const Label& label) {
LabelNode* node;
ASMJIT_PROPAGATE(labelNodeOf(&node, label));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Passes]
// ============================================================================
ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept {
for (Pass* pass : _passes)
if (strcmp(pass->name(), name) == 0)
return pass;
return nullptr;
}
ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (ASMJIT_UNLIKELY(pass == nullptr)) {
// Since this is directly called by `addPassT()` we treat `null` argument
// as out-of-memory condition. Otherwise it would be API misuse.
return DebugUtils::errored(kErrorOutOfMemory);
}
else if (ASMJIT_UNLIKELY(pass->_cb)) {
// Kinda weird, but okay...
if (pass->_cb == this)
return kErrorOk;
return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_PROPAGATE(_passes.append(&_allocator, pass));
pass->_cb = this;
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (ASMJIT_UNLIKELY(pass == nullptr))
return DebugUtils::errored(kErrorInvalidArgument);
if (pass->_cb != nullptr) {
if (pass->_cb != this)
return DebugUtils::errored(kErrorInvalidState);
uint32_t index = _passes.indexOf(pass);
ASMJIT_ASSERT(index != Globals::kNotFound);
pass->_cb = nullptr;
_passes.removeAt(index);
}
pass->~Pass();
return kErrorOk;
}
Error BaseBuilder::runPasses() {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (_passes.empty())
return kErrorOk;
ErrorHandler* prev = errorHandler();
PostponedErrorHandler postponed;
Error err = kErrorOk;
setErrorHandler(&postponed);
for (Pass* pass : _passes) {
_passZone.reset();
err = pass->run(&_passZone, _logger);
if (err)
break;
}
_passZone.reset();
setErrorHandler(prev);
if (ASMJIT_UNLIKELY(err))
return reportError(err, !postponed._message.empty() ? postponed._message.data() : nullptr);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Emit]
// ============================================================================
Error BaseBuilder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
uint32_t opCount = EmitterUtils::opCountFromEmitArgs(o0, o1, o2, opExt);
uint32_t options = instOptions() | forcedInstOptions();
if (options & BaseInst::kOptionReserved) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
#ifndef ASMJIT_NO_VALIDATION
// Strict validation.
if (hasValidationOption(kValidationOptionIntermediate)) {
Operand_ opArray[Globals::kMaxOpCount];
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
Error err = InstAPI::validate(arch(), BaseInst(instId, options, _extraReg), opArray, opCount);
if (ASMJIT_UNLIKELY(err)) {
resetInstOptions();
resetExtraReg();
resetInlineComment();
return reportError(err);
}
}
#endif
// Clear options that should never be part of `InstNode`.
options &= ~BaseInst::kOptionReserved;
}
uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
const char* comment = inlineComment();
resetInstOptions();
resetInlineComment();
if (ASMJIT_UNLIKELY(!node)) {
resetExtraReg();
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
node = new(node) InstNode(this, instId, options, opCount, opCapacity);
node->setExtraReg(extraReg());
node->setOp(0, o0);
node->setOp(1, o1);
node->setOp(2, o2);
for (uint32_t i = 3; i < opCount; i++)
node->setOp(i, opExt[i - 3]);
node->resetOpRange(opCount, opCapacity);
if (comment)
node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
addNode(node);
resetExtraReg();
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Align]
// ============================================================================
Error BaseBuilder::align(uint32_t alignMode, uint32_t alignment) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
AlignNode* node;
ASMJIT_PROPAGATE(_newAlignNode(&node, alignMode, alignment));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Embed]
// ============================================================================
Error BaseBuilder::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newEmbedDataNode(&node, Type::kIdU8, data, dataSize));
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedDataArray(uint32_t typeId, const void* data, size_t itemCount, size_t itemRepeat) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newEmbedDataNode(&node, typeId, data, itemCount, itemRepeat));
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (!isLabelValid(label))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label));
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newEmbedDataNode(&node, Type::kIdU8, nullptr, pool.size()));
pool.fill(node->data());
addNode(node);
return kErrorOk;
}
// EmbedLabel / EmbedLabelDelta
// ----------------------------
//
// If dataSize is zero it means that the size is the same as target register
// width, however, if it's provided we really want to validate whether it's
// within the possible range.
static inline bool BaseBuilder_checkDataSize(size_t dataSize) noexcept {
return !dataSize || (Support::isPowerOf2(dataSize) && dataSize <= 8);
}
Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (!BaseBuilder_checkDataSize(dataSize))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
EmbedLabelNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedLabelNode>(&node, label.id(), uint32_t(dataSize)));
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (!BaseBuilder_checkDataSize(dataSize))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
EmbedLabelDeltaNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedLabelDeltaNode>(&node, label.id(), base.id(), uint32_t(dataSize)));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Comment]
// ============================================================================
Error BaseBuilder::comment(const char* data, size_t size) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
CommentNode* node;
ASMJIT_PROPAGATE(_newCommentNode(&node, data, size));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Serialize]
// ============================================================================
Error BaseBuilder::serializeTo(BaseEmitter* dst) {
Error err = kErrorOk;
BaseNode* node_ = _firstNode;
Operand_ opArray[Globals::kMaxOpCount];
do {
dst->setInlineComment(node_->inlineComment());
if (node_->isInst()) {
InstNode* node = node_->as<InstNode>();
// NOTE: Inlined to remove one additional call per instruction.
dst->setInstOptions(node->instOptions());
dst->setExtraReg(node->extraReg());
const Operand_* op = node->operands();
const Operand_* opExt = EmitterUtils::noExt;
uint32_t opCount = node->opCount();
if (opCount > 3) {
uint32_t i = 4;
opArray[3] = op[3];
while (i < opCount) {
opArray[i].copyFrom(op[i]);
i++;
}
while (i < Globals::kMaxOpCount) {
opArray[i].reset();
i++;
}
opExt = opArray + 3;
}
err = dst->_emit(node->id(), op[0], op[1], op[2], opExt);
}
else if (node_->isLabel()) {
if (node_->isConstPool()) {
ConstPoolNode* node = node_->as<ConstPoolNode>();
err = dst->embedConstPool(node->label(), node->constPool());
}
else {
LabelNode* node = node_->as<LabelNode>();
err = dst->bind(node->label());
}
}
else if (node_->isAlign()) {
AlignNode* node = node_->as<AlignNode>();
err = dst->align(node->alignMode(), node->alignment());
}
else if (node_->isEmbedData()) {
EmbedDataNode* node = node_->as<EmbedDataNode>();
err = dst->embedDataArray(node->typeId(), node->data(), node->itemCount(), node->repeatCount());
}
else if (node_->isEmbedLabel()) {
EmbedLabelNode* node = node_->as<EmbedLabelNode>();
err = dst->embedLabel(node->label(), node->dataSize());
}
else if (node_->isEmbedLabelDelta()) {
EmbedLabelDeltaNode* node = node_->as<EmbedLabelDeltaNode>();
err = dst->embedLabelDelta(node->label(), node->baseLabel(), node->dataSize());
}
else if (node_->isSection()) {
SectionNode* node = node_->as<SectionNode>();
err = dst->section(_code->sectionById(node->id()));
}
else if (node_->isComment()) {
CommentNode* node = node_->as<CommentNode>();
err = dst->comment(node->inlineComment());
}
if (err) break;
node_ = node_->next();
} while (node_);
return err;
}
// ============================================================================
// [asmjit::BaseBuilder - Events]
// ============================================================================
Error BaseBuilder::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
SectionNode* initialSection;
Error err = sectionNodeOf(&initialSection, 0);
if (!err)
err = _passes.willGrow(&_allocator, 8);
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
return err;
}
_cursor = initialSection;
_firstNode = initialSection;
_lastNode = initialSection;
initialSection->setFlags(BaseNode::kFlagIsActive);
return kErrorOk;
}
Error BaseBuilder::onDetach(CodeHolder* code) noexcept {
BaseBuilder_deletePasses(this);
_sectionNodes.reset();
_labelNodes.reset();
_allocator.reset(&_codeZone);
_codeZone.reset();
_dataZone.reset();
_passZone.reset();
_nodeFlags = 0;
_cursor = nullptr;
_firstNode = nullptr;
_lastNode = nullptr;
return Base::onDetach(code);
}
// ============================================================================
// [asmjit::Pass - Construction / Destruction]
// ============================================================================
Pass::Pass(const char* name) noexcept
: _name(name) {}
Pass::~Pass() noexcept {}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_BUILDER

1435
deps/asmjit/src/asmjit/core/builder.h vendored Normal file

File diff suppressed because it is too large Load Diff

126
deps/asmjit/src/asmjit/core/codebuffer.h vendored Normal file
View File

@ -0,0 +1,126 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_CODEBUFFER_H_INCLUDED
#define ASMJIT_CORE_CODEBUFFER_H_INCLUDED
#include "../core/globals.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::CodeBuffer]
// ============================================================================
//! Code or data buffer.
struct CodeBuffer {
//! The content of the buffer (data).
uint8_t* _data;
//! Number of bytes of `data` used.
size_t _size;
//! Buffer capacity (in bytes).
size_t _capacity;
//! Buffer flags.
uint32_t _flags;
//! Code buffer flags.
enum Flags : uint32_t {
//! Buffer is external (not allocated by asmjit).
kFlagIsExternal = 0x00000001u,
//! Buffer is fixed (cannot be reallocated).
kFlagIsFixed = 0x00000002u
};
//! \name Overloaded Operators
//! \{
//! Returns a referebce to the byte at the given `index`.
inline uint8_t& operator[](size_t index) noexcept {
ASMJIT_ASSERT(index < _size);
return _data[index];
}
//! \overload
inline const uint8_t& operator[](size_t index) const noexcept {
ASMJIT_ASSERT(index < _size);
return _data[index];
}
//! \}
//! \name Accessors
//! \{
//! Returns code buffer flags, see \ref Flags.
inline uint32_t flags() const noexcept { return _flags; }
//! Tests whether the code buffer has the given `flag` set.
inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
//! Tests whether this code buffer has a fixed size.
//!
//! Fixed size means that the code buffer is fixed and cannot grow.
inline bool isFixed() const noexcept { return hasFlag(kFlagIsFixed); }
//! Tests whether the data in this code buffer is external.
//!
//! External data can only be provided by users, it's never used by AsmJit.
inline bool isExternal() const noexcept { return hasFlag(kFlagIsExternal); }
//! Tests whether the data in this code buffer is allocated (non-null).
inline bool isAllocated() const noexcept { return _data != nullptr; }
//! Tests whether the code buffer is empty.
inline bool empty() const noexcept { return !_size; }
//! Returns the size of the data.
inline size_t size() const noexcept { return _size; }
//! Returns the capacity of the data.
inline size_t capacity() const noexcept { return _capacity; }
//! Returns the pointer to the data the buffer references.
inline uint8_t* data() noexcept { return _data; }
//! \overload
inline const uint8_t* data() const noexcept { return _data; }
//! \}
//! \name Iterators
//! \{
inline uint8_t* begin() noexcept { return _data; }
inline const uint8_t* begin() const noexcept { return _data; }
inline uint8_t* end() noexcept { return _data + _size; }
inline const uint8_t* end() const noexcept { return _data + _size; }
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CODEBUFFER_H_INCLUDED

1150
deps/asmjit/src/asmjit/core/codeholder.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

1061
deps/asmjit/src/asmjit/core/codeholder.h vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,151 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/codeholder.h"
#include "../core/codewriter_p.h"
ASMJIT_BEGIN_NAMESPACE
bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
uint32_t bitCount = format.immBitCount();
uint32_t bitShift = format.immBitShift();
uint32_t discardLsb = format.immDiscardLsb();
if (!bitCount || bitCount > format.valueSize() * 8u)
return false;
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
return false;
offset64 >>= discardLsb;
}
if (!Support::isInt32(offset64))
return false;
int32_t offset32 = int32_t(offset64);
if (!Support::isEncodableOffset32(offset32, bitCount))
return false;
switch (format.type()) {
case OffsetFormat::kTypeCommon: {
*dst = (uint32_t(offset32) & Support::lsbMask<uint32_t>(bitCount)) << bitShift;
return true;
}
case OffsetFormat::kTypeAArch64_ADR:
case OffsetFormat::kTypeAArch64_ADRP: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5)
return false;
uint32_t immLo = uint32_t(offset32) & 0x3u;
uint32_t immHi = uint32_t(offset32 >> 2) & Support::lsbMask<uint32_t>(19);
*dst = (immLo << 29) | (immHi << 5);
return true;
}
default:
return false;
}
}
bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
uint32_t bitCount = format.immBitCount();
uint32_t discardLsb = format.immDiscardLsb();
if (!bitCount || bitCount > format.valueSize() * 8u)
return false;
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
return false;
offset64 >>= discardLsb;
}
if (!Support::isEncodableOffset64(offset64, bitCount))
return false;
switch (format.type()) {
case OffsetFormat::kTypeCommon: {
*dst = (uint64_t(offset64) & Support::lsbMask<uint64_t>(bitCount)) << format.immBitShift();
return true;
}
default:
return false;
}
}
bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept {
// Offset the destination by ValueOffset so the `dst` points to the
// patched word instead of the beginning of the patched region.
dst = static_cast<char*>(dst) + format.valueOffset();
switch (format.valueSize()) {
case 1: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format))
return false;
Support::writeU8(dst, Support::readU8(dst) | mask);
return true;
}
case 2: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format))
return false;
Support::writeU16uLE(dst, Support::readU16uLE(dst) | mask);
return true;
}
case 4: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format))
return false;
Support::writeU32uLE(dst, Support::readU32uLE(dst) | mask);
return true;
}
case 8: {
uint64_t mask;
if (!encodeOffset64(&mask, offset64, format))
return false;
Support::writeU64uLE(dst, Support::readU64uLE(dst) | mask);
return true;
}
default:
return false;
}
}
ASMJIT_END_NAMESPACE

View File

@ -0,0 +1,208 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
#define ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
#include "../core/assembler.h"
#include "../core/codebuffer.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_assembler
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
struct OffsetFormat;
// ============================================================================
// [asmjit::CodeWriter]
// ============================================================================
//! Helper that is used to write into a \ref CodeBuffer held by \ref BaseAssembler.
class CodeWriter {
public:
uint8_t* _cursor;
ASMJIT_INLINE explicit CodeWriter(BaseAssembler* a) noexcept
: _cursor(a->_bufferPtr) {}
ASMJIT_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor);
if (ASMJIT_UNLIKELY(remainingSpace < n)) {
CodeBuffer& buffer = a->_section->_buffer;
Error err = a->_code->growBuffer(&buffer, n);
if (ASMJIT_UNLIKELY(err))
return a->reportError(err);
_cursor = a->_bufferPtr;
}
return kErrorOk;
}
ASMJIT_INLINE uint8_t* cursor() const noexcept { return _cursor; }
ASMJIT_INLINE void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
ASMJIT_INLINE void advance(size_t n) noexcept { _cursor += n; }
ASMJIT_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
ASMJIT_ASSERT(_cursor >= from);
return (size_t)(_cursor - from);
}
template<typename T>
ASMJIT_INLINE void emit8(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
_cursor[0] = uint8_t(U(val) & U(0xFF));
_cursor++;
}
template<typename T, typename Y>
ASMJIT_INLINE void emit8If(T val, Y cond) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_ASSERT(size_t(cond) <= 1u);
_cursor[0] = uint8_t(U(val) & U(0xFF));
_cursor += size_t(cond);
}
template<typename T>
ASMJIT_INLINE void emit16uLE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU16uLE(_cursor, uint32_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_INLINE void emit16uBE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU16uBE(_cursor, uint32_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_INLINE void emit32uLE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU32uLE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
template<typename T>
ASMJIT_INLINE void emit32uBE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU32uBE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
ASMJIT_INLINE void emitData(const void* data, size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memcpy(_cursor, data, size);
_cursor += size;
}
template<typename T>
ASMJIT_INLINE void emitValueLE(const T& value, size_t size) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_ASSERT(size <= sizeof(T));
U v = U(value);
for (uint32_t i = 0; i < size; i++) {
_cursor[i] = uint8_t(v & 0xFFu);
v >>= 8;
}
_cursor += size;
}
template<typename T>
ASMJIT_INLINE void emitValueBE(const T& value, size_t size) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_ASSERT(size <= sizeof(T));
U v = U(value);
for (uint32_t i = 0; i < size; i++) {
_cursor[i] = uint8_t(v >> (sizeof(T) - 8));
v <<= 8;
}
_cursor += size;
}
ASMJIT_INLINE void emitZeros(size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memset(_cursor, 0, size);
_cursor += size;
}
ASMJIT_INLINE void remove8(uint8_t* where) noexcept {
ASMJIT_ASSERT(where < _cursor);
uint8_t* p = where;
while (++p != _cursor)
p[-1] = p[0];
_cursor--;
}
template<typename T>
ASMJIT_INLINE void insert8(uint8_t* where, T val) noexcept {
uint8_t* p = _cursor;
while (p != where) {
p[0] = p[-1];
p--;
}
*p = uint8_t(val & 0xFF);
_cursor++;
}
ASMJIT_INLINE void done(BaseAssembler* a) noexcept {
CodeBuffer& buffer = a->_section->_buffer;
size_t newSize = (size_t)(_cursor - a->_bufferData);
ASMJIT_ASSERT(newSize <= buffer.capacity());
a->_bufferPtr = _cursor;
buffer._size = Support::max(buffer._size, newSize);
}
};
// ============================================================================
// [asmjit::CodeWriterUtils]
// ============================================================================
namespace CodeWriterUtils {
bool encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
bool encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
bool writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept;
} // {CodeWriterUtils}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED

628
deps/asmjit/src/asmjit/core/compiler.cpp vendored Normal file
View File

@ -0,0 +1,628 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/assembler.h"
#include "../core/compiler.h"
#include "../core/cpuinfo.h"
#include "../core/logger.h"
#include "../core/rapass_p.h"
#include "../core/rastack_p.h"
#include "../core/support.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::GlobalConstPoolPass]
// ============================================================================
class GlobalConstPoolPass : public Pass {
typedef Pass Base;
ASMJIT_NONCOPYABLE(GlobalConstPoolPass)
GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {}
Error run(Zone* zone, Logger* logger) override {
DebugUtils::unused(zone, logger);
// Flush the global constant pool.
BaseCompiler* compiler = static_cast<BaseCompiler*>(_cb);
if (compiler->_globalConstPool) {
compiler->addAfter(compiler->_globalConstPool, compiler->lastNode());
compiler->_globalConstPool = nullptr;
}
return kErrorOk;
}
};
// ============================================================================
// [asmjit::BaseCompiler - Construction / Destruction]
// ============================================================================
BaseCompiler::BaseCompiler() noexcept
: BaseBuilder(),
_func(nullptr),
_vRegZone(4096 - Zone::kBlockOverhead),
_vRegArray(),
_localConstPool(nullptr),
_globalConstPool(nullptr) {
_emitterType = uint8_t(kTypeCompiler);
_validationFlags = uint8_t(InstAPI::kValidationFlagVirtRegs);
}
BaseCompiler::~BaseCompiler() noexcept {}
// ============================================================================
// [asmjit::BaseCompiler - Function Management]
// ============================================================================
Error BaseCompiler::_newFuncNode(FuncNode** out, const FuncSignature& signature) {
*out = nullptr;
// Create FuncNode together with all the required surrounding nodes.
FuncNode* funcNode;
ASMJIT_PROPAGATE(_newNodeT<FuncNode>(&funcNode));
ASMJIT_PROPAGATE(_newLabelNode(&funcNode->_exitNode));
ASMJIT_PROPAGATE(_newNodeT<SentinelNode>(&funcNode->_end, SentinelNode::kSentinelFuncEnd));
// Initialize the function's detail info.
Error err = funcNode->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err))
return reportError(err);
// If the Target guarantees greater stack alignment than required by the
// calling convention then override it as we can prevent having to perform
// dynamic stack alignment
uint32_t environmentStackAlignment = _environment.stackAlignment();
if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment)
funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment);
// Initialize the function frame.
err = funcNode->_frame.init(funcNode->_funcDetail);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
// Allocate space for function arguments.
funcNode->_args = nullptr;
if (funcNode->argCount() != 0) {
funcNode->_args = _allocator.allocT<FuncNode::ArgPack>(funcNode->argCount() * sizeof(FuncNode::ArgPack));
if (ASMJIT_UNLIKELY(!funcNode->_args))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
memset(funcNode->_args, 0, funcNode->argCount() * sizeof(FuncNode::ArgPack));
}
ASMJIT_PROPAGATE(registerLabelNode(funcNode));
*out = funcNode;
return kErrorOk;
}
Error BaseCompiler::_addFuncNode(FuncNode** out, const FuncSignature& signature) {
ASMJIT_PROPAGATE(_newFuncNode(out, signature));
addFunc(*out);
return kErrorOk;
}
Error BaseCompiler::_newRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
uint32_t opCount = !o1.isNone() ? 2u : !o0.isNone() ? 1u : 0u;
FuncRetNode* node;
ASMJIT_PROPAGATE(_newNodeT<FuncRetNode>(&node));
node->setOpCount(opCount);
node->setOp(0, o0);
node->setOp(1, o1);
node->resetOpRange(2, node->opCapacity());
*out = node;
return kErrorOk;
}
Error BaseCompiler::_addRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
ASMJIT_PROPAGATE(_newRetNode(out, o0, o1));
addNode(*out);
return kErrorOk;
}
FuncNode* BaseCompiler::addFunc(FuncNode* func) {
ASMJIT_ASSERT(_func == nullptr);
_func = func;
addNode(func); // Function node.
BaseNode* prev = cursor(); // {CURSOR}.
addNode(func->exitNode()); // Function exit label.
addNode(func->endNode()); // Function end sentinel.
_setCursor(prev);
return func;
}
Error BaseCompiler::endFunc() {
FuncNode* func = _func;
if (ASMJIT_UNLIKELY(!func))
return reportError(DebugUtils::errored(kErrorInvalidState));
// Add the local constant pool at the end of the function (if exists).
if (_localConstPool) {
setCursor(func->endNode()->prev());
addNode(_localConstPool);
_localConstPool = nullptr;
}
// Mark as finished.
_func = nullptr;
SentinelNode* end = func->endNode();
setCursor(end);
return kErrorOk;
}
Error BaseCompiler::_setArg(size_t argIndex, size_t valueIndex, const BaseReg& r) {
FuncNode* func = _func;
if (ASMJIT_UNLIKELY(!func))
return reportError(DebugUtils::errored(kErrorInvalidState));
if (ASMJIT_UNLIKELY(!isVirtRegValid(r)))
return reportError(DebugUtils::errored(kErrorInvalidVirtId));
VirtReg* vReg = virtRegByReg(r);
func->setArg(argIndex, valueIndex, vReg);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseCompiler - Function Invocation]
// ============================================================================
Error BaseCompiler::_newInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
InvokeNode* node;
ASMJIT_PROPAGATE(_newNodeT<InvokeNode>(&node, instId, 0u));
node->setOpCount(1);
node->setOp(0, o0);
node->resetOpRange(1, node->opCapacity());
Error err = node->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err))
return reportError(err);
// Skip the allocation if there are no arguments.
uint32_t argCount = signature.argCount();
if (argCount) {
node->_args = static_cast<InvokeNode::OperandPack*>(_allocator.alloc(argCount * sizeof(InvokeNode::OperandPack)));
if (!node->_args)
reportError(DebugUtils::errored(kErrorOutOfMemory));
memset(node->_args, 0, argCount * sizeof(InvokeNode::OperandPack));
}
*out = node;
return kErrorOk;
}
Error BaseCompiler::_addInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
ASMJIT_PROPAGATE(_newInvokeNode(out, instId, o0, signature));
addNode(*out);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseCompiler - Virtual Registers]
// ============================================================================
static void BaseCompiler_assignGenericName(BaseCompiler* self, VirtReg* vReg) {
uint32_t index = unsigned(Operand::virtIdToIndex(vReg->_id));
char buf[64];
int size = snprintf(buf, ASMJIT_ARRAY_SIZE(buf), "%%%u", unsigned(index));
ASMJIT_ASSERT(size > 0 && size < int(ASMJIT_ARRAY_SIZE(buf)));
vReg->_name.setData(&self->_dataZone, buf, unsigned(size));
}
Error BaseCompiler::newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signature, const char* name) {
*out = nullptr;
uint32_t index = _vRegArray.size();
if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount)))
return reportError(DebugUtils::errored(kErrorTooManyVirtRegs));
if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
VirtReg* vReg = _vRegZone.allocZeroedT<VirtReg>();
if (ASMJIT_UNLIKELY(!vReg))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
uint32_t size = Type::sizeOf(typeId);
uint32_t alignment = Support::min<uint32_t>(size, 64);
vReg = new(vReg) VirtReg(Operand::indexToVirtId(index), signature, size, alignment, typeId);
#ifndef ASMJIT_NO_LOGGING
if (name && name[0] != '\0')
vReg->_name.setData(&_dataZone, name, SIZE_MAX);
else
BaseCompiler_assignGenericName(this, vReg);
#else
DebugUtils::unused(name);
#endif
_vRegArray.appendUnsafe(vReg);
*out = vReg;
return kErrorOk;
}
Error BaseCompiler::_newReg(BaseReg* out, uint32_t typeId, const char* name) {
RegInfo regInfo;
out->reset();
Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, &regInfo);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
out->_initReg(regInfo.signature(), vReg->id());
return kErrorOk;
}
Error BaseCompiler::_newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, ...) {
va_list ap;
StringTmp<256> sb;
va_start(ap, fmt);
sb.appendVFormat(fmt, ap);
va_end(ap);
return _newReg(out, typeId, sb.data());
}
Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name) {
out->reset();
RegInfo regInfo;
uint32_t typeId;
if (isVirtRegValid(ref)) {
VirtReg* vRef = virtRegByReg(ref);
typeId = vRef->typeId();
// NOTE: It's possible to cast one register type to another if it's the
// same register group. However, VirtReg always contains the TypeId that
// was used to create the register. This means that in some cases we may
// end up having different size of `ref` and `vRef`. In such case we
// adjust the TypeId to match the `ref` register type instead of the
// original register type, which should be the expected behavior.
uint32_t typeSize = Type::sizeOf(typeId);
uint32_t refSize = ref.size();
if (typeSize != refSize) {
if (Type::isInt(typeId)) {
// GP register - change TypeId to match `ref`, but keep sign of `vRef`.
switch (refSize) {
case 1: typeId = Type::kIdI8 | (typeId & 1); break;
case 2: typeId = Type::kIdI16 | (typeId & 1); break;
case 4: typeId = Type::kIdI32 | (typeId & 1); break;
case 8: typeId = Type::kIdI64 | (typeId & 1); break;
default: typeId = Type::kIdVoid; break;
}
}
else if (Type::isMmx(typeId)) {
// MMX register - always use 64-bit.
typeId = Type::kIdMmx64;
}
else if (Type::isMask(typeId)) {
// Mask register - change TypeId to match `ref` size.
switch (refSize) {
case 1: typeId = Type::kIdMask8; break;
case 2: typeId = Type::kIdMask16; break;
case 4: typeId = Type::kIdMask32; break;
case 8: typeId = Type::kIdMask64; break;
default: typeId = Type::kIdVoid; break;
}
}
else {
// VEC register - change TypeId to match `ref` size, keep vector metadata.
uint32_t elementTypeId = Type::baseOf(typeId);
switch (refSize) {
case 16: typeId = Type::_kIdVec128Start + (elementTypeId - Type::kIdI8); break;
case 32: typeId = Type::_kIdVec256Start + (elementTypeId - Type::kIdI8); break;
case 64: typeId = Type::_kIdVec512Start + (elementTypeId - Type::kIdI8); break;
default: typeId = Type::kIdVoid; break;
}
}
if (typeId == Type::kIdVoid)
return reportError(DebugUtils::errored(kErrorInvalidState));
}
}
else {
typeId = ref.type();
}
Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, &regInfo);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
out->_initReg(regInfo.signature(), vReg->id());
return kErrorOk;
}
Error BaseCompiler::_newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt, ...) {
va_list ap;
StringTmp<256> sb;
va_start(ap, fmt);
sb.appendVFormat(fmt, ap);
va_end(ap);
return _newReg(out, ref, sb.data());
}
Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) {
out->reset();
if (size == 0)
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (alignment == 0)
alignment = 1;
if (!Support::isPowerOf2(alignment))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (alignment > 64)
alignment = 64;
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, 0, 0, name));
vReg->_virtSize = size;
vReg->_isStack = true;
vReg->_alignment = uint8_t(alignment);
// Set the memory operand to GPD/GPQ and its id to VirtReg.
*out = BaseMem(BaseMem::Decomposed { _gpRegInfo.type(), vReg->id(), BaseReg::kTypeNone, 0, 0, 0, BaseMem::kSignatureMemRegHomeFlag });
return kErrorOk;
}
Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) {
if (!isVirtIdValid(virtId))
return DebugUtils::errored(kErrorInvalidVirtId);
if (newAlignment && !Support::isPowerOf2(newAlignment))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (newAlignment > 64)
newAlignment = 64;
VirtReg* vReg = virtRegById(virtId);
if (newSize)
vReg->_virtSize = newSize;
if (newAlignment)
vReg->_alignment = uint8_t(newAlignment);
// This is required if the RAPass is already running. There is a chance that
// a stack-slot has been already allocated and in that case it has to be
// updated as well, otherwise we would allocate wrong amount of memory.
RAWorkReg* workReg = vReg->_workReg;
if (workReg && workReg->_stackSlot) {
workReg->_stackSlot->_size = vReg->_virtSize;
workReg->_stackSlot->_alignment = vReg->_alignment;
}
return kErrorOk;
}
Error BaseCompiler::_newConst(BaseMem* out, uint32_t scope, const void* data, size_t size) {
out->reset();
ConstPoolNode** pPool;
if (scope == ConstPool::kScopeLocal)
pPool = &_localConstPool;
else if (scope == ConstPool::kScopeGlobal)
pPool = &_globalConstPool;
else
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (!*pPool)
ASMJIT_PROPAGATE(_newConstPoolNode(pPool));
ConstPoolNode* pool = *pPool;
size_t off;
Error err = pool->add(data, size, off);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
*out = BaseMem(BaseMem::Decomposed {
Label::kLabelTag, // Base type.
pool->labelId(), // Base id.
0, // Index type.
0, // Index id.
int32_t(off), // Offset.
uint32_t(size), // Size.
0 // Flags.
});
return kErrorOk;
}
void BaseCompiler::rename(const BaseReg& reg, const char* fmt, ...) {
if (!reg.isVirtReg()) return;
VirtReg* vReg = virtRegById(reg.id());
if (!vReg) return;
if (fmt && fmt[0] != '\0') {
char buf[128];
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
va_end(ap);
vReg->_name.setData(&_dataZone, buf, SIZE_MAX);
}
else {
BaseCompiler_assignGenericName(this, vReg);
}
}
// ============================================================================
// [asmjit::BaseCompiler - Jump Annotations]
// ============================================================================
Error BaseCompiler::newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation) {
JumpNode* node = _allocator.allocT<JumpNode>();
uint32_t opCount = 1;
*out = node;
if (ASMJIT_UNLIKELY(!node))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
node = new(node) JumpNode(this, instId, instOptions, opCount, annotation);
node->setOp(0, o0);
node->resetOpRange(opCount, JumpNode::kBaseOpCapacity);
return kErrorOk;
}
Error BaseCompiler::emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation) {
uint32_t options = instOptions() | forcedInstOptions();
RegOnly extra = extraReg();
const char* comment = inlineComment();
resetInstOptions();
resetInlineComment();
resetExtraReg();
JumpNode* node;
ASMJIT_PROPAGATE(newJumpNode(&node, instId, options, o0, annotation));
node->setExtraReg(extra);
if (comment)
node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
addNode(node);
return kErrorOk;
}
JumpAnnotation* BaseCompiler::newJumpAnnotation() {
if (_jumpAnnotations.grow(&_allocator, 1) != kErrorOk) {
reportError(DebugUtils::errored(kErrorOutOfMemory));
return nullptr;
}
uint32_t id = _jumpAnnotations.size();
JumpAnnotation* jumpAnnotation = _allocator.newT<JumpAnnotation>(this, id);
if (!jumpAnnotation) {
reportError(DebugUtils::errored(kErrorOutOfMemory));
return nullptr;
}
_jumpAnnotations.appendUnsafe(jumpAnnotation);
return jumpAnnotation;
}
// ============================================================================
// [asmjit::BaseCompiler - Events]
// ============================================================================
Error BaseCompiler::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
const ArchTraits& archTraits = ArchTraits::byArch(code->arch());
uint32_t nativeRegType = Environment::is32Bit(code->arch()) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
_gpRegInfo.setSignature(archTraits.regTypeToSignature(nativeRegType));
Error err = addPassT<GlobalConstPoolPass>();
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
return err;
}
return kErrorOk;
}
Error BaseCompiler::onDetach(CodeHolder* code) noexcept {
_func = nullptr;
_localConstPool = nullptr;
_globalConstPool = nullptr;
_vRegArray.reset();
_vRegZone.reset();
return Base::onDetach(code);
}
// ============================================================================
// [asmjit::FuncPass - Construction / Destruction]
// ============================================================================
FuncPass::FuncPass(const char* name) noexcept
: Pass(name) {}
// ============================================================================
// [asmjit::FuncPass - Run]
// ============================================================================
Error FuncPass::run(Zone* zone, Logger* logger) {
BaseNode* node = cb()->firstNode();
if (!node) return kErrorOk;
do {
if (node->type() == BaseNode::kNodeFunc) {
FuncNode* func = node->as<FuncNode>();
node = func->endNode();
ASMJIT_PROPAGATE(runOnFunction(zone, logger, func));
}
// Find a function by skipping all nodes that are not `kNodeFunc`.
do {
node = node->next();
} while (node && node->type() != BaseNode::kNodeFunc);
} while (node);
return kErrorOk;
}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER

763
deps/asmjit/src/asmjit/core/compiler.h vendored Normal file
View File

@ -0,0 +1,763 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_COMPILER_H_INCLUDED
#define ASMJIT_CORE_COMPILER_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/assembler.h"
#include "../core/builder.h"
#include "../core/constpool.h"
#include "../core/compilerdefs.h"
#include "../core/func.h"
#include "../core/inst.h"
#include "../core/operand.h"
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonevector.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [Forward Declarations]
// ============================================================================
class JumpAnnotation;
class JumpNode;
class FuncNode;
class FuncRetNode;
class InvokeNode;
//! \addtogroup asmjit_compiler
//! \{
// ============================================================================
// [asmjit::BaseCompiler]
// ============================================================================
//! Code emitter that uses virtual registers and performs register allocation.
//!
//! Compiler is a high-level code-generation tool that provides register
//! allocation and automatic handling of function calling conventions. It was
//! primarily designed for merging multiple parts of code into a function
//! without worrying about registers and function calling conventions.
//!
//! BaseCompiler can be used, with a minimum effort, to handle 32-bit and
//! 64-bit code generation within a single code base.
//!
//! BaseCompiler is based on BaseBuilder and contains all the features it
//! provides. It means that the code it stores can be modified (removed, added,
//! injected) and analyzed. When the code is finalized the compiler can emit
//! the code into an Assembler to translate the abstract representation into a
//! machine code.
//!
//! Check out architecture specific compilers for more details and examples:
//!
//! - \ref x86::Compiler - X86/X64 compiler implementation.
class ASMJIT_VIRTAPI BaseCompiler : public BaseBuilder {
public:
ASMJIT_NONCOPYABLE(BaseCompiler)
typedef BaseBuilder Base;
//! Current function.
FuncNode* _func;
//! Allocates `VirtReg` objects.
Zone _vRegZone;
//! Stores array of `VirtReg` pointers.
ZoneVector<VirtReg*> _vRegArray;
//! Stores jump annotations.
ZoneVector<JumpAnnotation*> _jumpAnnotations;
//! Local constant pool, flushed at the end of each function.
ConstPoolNode* _localConstPool;
//! Global constant pool, flushed by `finalize()`.
ConstPoolNode* _globalConstPool;
//! \name Construction & Destruction
//! \{
//! Creates a new `BaseCompiler` instance.
ASMJIT_API BaseCompiler() noexcept;
//! Destroys the `BaseCompiler` instance.
ASMJIT_API virtual ~BaseCompiler() noexcept;
//! \}
//! \name Function Management
//! \{
//! Returns the current function.
inline FuncNode* func() const noexcept { return _func; }
//! Creates a new \ref FuncNode.
ASMJIT_API Error _newFuncNode(FuncNode** out, const FuncSignature& signature);
//! Creates a new \ref FuncNode adds it to the compiler.
ASMJIT_API Error _addFuncNode(FuncNode** out, const FuncSignature& signature);
//! Creates a new \ref FuncRetNode.
ASMJIT_API Error _newRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1);
//! Creates a new \ref FuncRetNode and adds it to the compiler.
ASMJIT_API Error _addRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1);
//! Creates a new \ref FuncNode with the given `signature` and returns it.
inline FuncNode* newFunc(const FuncSignature& signature) {
FuncNode* node;
_newFuncNode(&node, signature);
return node;
}
//! Creates a new \ref FuncNode with the given `signature`, adds it to the
//! compiler by using the \ref addFunc(FuncNode*) overload, and returns it.
inline FuncNode* addFunc(const FuncSignature& signature) {
FuncNode* node;
_addFuncNode(&node, signature);
return node;
}
//! Adds a function `node` to the instruction stream.
ASMJIT_API FuncNode* addFunc(FuncNode* func);
//! Emits a sentinel that marks the end of the current function.
ASMJIT_API Error endFunc();
ASMJIT_API Error _setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg);
//! Sets a function argument at `argIndex` to `reg`.
inline Error setArg(size_t argIndex, const BaseReg& reg) { return _setArg(argIndex, 0, reg); }
//! Sets a function argument at `argIndex` at `valueIndex` to `reg`.
inline Error setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) { return _setArg(argIndex, valueIndex, reg); }
inline FuncRetNode* newRet(const Operand_& o0, const Operand_& o1) {
FuncRetNode* node;
_newRetNode(&node, o0, o1);
return node;
}
inline FuncRetNode* addRet(const Operand_& o0, const Operand_& o1) {
FuncRetNode* node;
_addRetNode(&node, o0, o1);
return node;
}
//! \}
//! \name Function Invocation
//! \{
//! Creates a new \ref InvokeNode.
ASMJIT_API Error _newInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature);
//! Creates a new \ref InvokeNode and adds it to Compiler.
ASMJIT_API Error _addInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature);
//! Creates a new `InvokeNode`.
inline InvokeNode* newCall(uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
InvokeNode* node;
_newInvokeNode(&node, instId, o0, signature);
return node;
}
//! Adds a new `InvokeNode`.
inline InvokeNode* addCall(uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
InvokeNode* node;
_addInvokeNode(&node, instId, o0, signature);
return node;
}
//! \}
//! \name Virtual Registers
//! \{
//! Creates a new virtual register representing the given `typeId` and `signature`.
//!
//! \note This function is public, but it's not generally recommended to be used
//! by AsmJit users, use architecture-specific `newReg()` functionality instead
//! or functions like \ref _newReg() and \ref _newRegFmt().
ASMJIT_API Error newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signature, const char* name);
//! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
ASMJIT_API Error _newReg(BaseReg* out, uint32_t typeId, const char* name = nullptr);
//! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
//!
//! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
ASMJIT_API Error _newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, ...);
//! Creates a new virtual register compatible with the provided reference register `ref`.
ASMJIT_API Error _newReg(BaseReg* out, const BaseReg& ref, const char* name = nullptr);
//! Creates a new virtual register compatible with the provided reference register `ref`.
//!
//! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
ASMJIT_API Error _newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt, ...);
//! Tests whether the given `id` is a valid virtual register id.
inline bool isVirtIdValid(uint32_t id) const noexcept {
uint32_t index = Operand::virtIdToIndex(id);
return index < _vRegArray.size();
}
//! Tests whether the given `reg` is a virtual register having a valid id.
inline bool isVirtRegValid(const BaseReg& reg) const noexcept {
return isVirtIdValid(reg.id());
}
//! Returns \ref VirtReg associated with the given `id`.
inline VirtReg* virtRegById(uint32_t id) const noexcept {
ASMJIT_ASSERT(isVirtIdValid(id));
return _vRegArray[Operand::virtIdToIndex(id)];
}
//! Returns \ref VirtReg associated with the given `reg`.
inline VirtReg* virtRegByReg(const BaseReg& reg) const noexcept { return virtRegById(reg.id()); }
//! Returns \ref VirtReg associated with the given virtual register `index`.
//!
//! \note This is not the same as virtual register id. The conversion between
//! id and its index is implemented by \ref Operand_::virtIdToIndex() and \ref
//! Operand_::indexToVirtId() functions.
inline VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; }
//! Returns an array of all virtual registers managed by the Compiler.
inline const ZoneVector<VirtReg*>& virtRegs() const noexcept { return _vRegArray; }
//! \name Stack
//! \{
//! Creates a new stack of the given `size` and `alignment` and stores it to `out`.
//!
//! \note `name` can be used to give the stack a name, for debugging purposes.
ASMJIT_API Error _newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name = nullptr);
//! Updates the stack size of a stack created by `_newStack()` by its `virtId`.
ASMJIT_API Error setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment = 0);
//! Updates the stack size of a stack created by `_newStack()`.
inline Error setStackSize(const BaseMem& mem, uint32_t newSize, uint32_t newAlignment = 0) {
return setStackSize(mem.id(), newSize, newAlignment);
}
//! \}
//! \name Constants
//! \{
//! Creates a new constant of the given `scope` (see \ref ConstPool::Scope).
//!
//! This function adds a constant of the given `size` to the built-in \ref
//! ConstPool and stores the reference to that constant to the `out` operand.
ASMJIT_API Error _newConst(BaseMem* out, uint32_t scope, const void* data, size_t size);
//! \}
//! \name Miscellaneous
//! \{
//! Rename the given virtual register `reg` to a formatted string `fmt`.
ASMJIT_API void rename(const BaseReg& reg, const char* fmt, ...);
//! \}
//! \name Jump Annotations
//! \{
inline const ZoneVector<JumpAnnotation*>& jumpAnnotations() const noexcept {
return _jumpAnnotations;
}
ASMJIT_API Error newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation);
ASMJIT_API Error emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation);
//! Returns a new `JumpAnnotation` instance, which can be used to aggregate
//! possible targets of a jump where the target is not a label, for example
//! to implement jump tables.
ASMJIT_API JumpAnnotation* newJumpAnnotation();
//! \}
#ifndef ASMJIT_NO_DEPRECATED
ASMJIT_DEPRECATED("alloc() has no effect, it will be removed in the future")
inline void alloc(BaseReg&) {}
ASMJIT_DEPRECATED("spill() has no effect, it will be removed in the future")
inline void spill(BaseReg&) {}
#endif // !ASMJIT_NO_DEPRECATED
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
//! \}
};
// ============================================================================
// [asmjit::JumpAnnotation]
// ============================================================================
//! Jump annotation used to annotate jumps.
//!
//! \ref BaseCompiler allows to emit jumps where the target is either register
//! or memory operand. Such jumps cannot be trivially inspected, so instead of
//! doing heuristics AsmJit allows to annotate such jumps with possible targets.
//! Register allocator then use the annotation to construct control-flow, which
//! is then used by liveness analysis and other tools to prepare ground for
//! register allocation.
class JumpAnnotation {
public:
ASMJIT_NONCOPYABLE(JumpAnnotation)
//! Compiler that owns this JumpAnnotation.
BaseCompiler* _compiler;
//! Annotation identifier.
uint32_t _annotationId;
//! Vector of label identifiers, see \ref labelIds().
ZoneVector<uint32_t> _labelIds;
inline JumpAnnotation(BaseCompiler* compiler, uint32_t annotationId) noexcept
: _compiler(compiler),
_annotationId(annotationId) {}
//! Returns the compiler that owns this JumpAnnotation.
inline BaseCompiler* compiler() const noexcept { return _compiler; }
//! Returns the annotation id.
inline uint32_t annotationId() const noexcept { return _annotationId; }
//! Returns a vector of label identifiers that lists all targets of the jump.
const ZoneVector<uint32_t>& labelIds() const noexcept { return _labelIds; }
//! Tests whether the given `label` is a target of this JumpAnnotation.
inline bool hasLabel(const Label& label) const noexcept { return hasLabelId(label.id()); }
//! Tests whether the given `labelId` is a target of this JumpAnnotation.
inline bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); }
//! Adds the `label` to the list of targets of this JumpAnnotation.
inline Error addLabel(const Label& label) noexcept { return addLabelId(label.id()); }
//! Adds the `labelId` to the list of targets of this JumpAnnotation.
inline Error addLabelId(uint32_t labelId) noexcept { return _labelIds.append(&_compiler->_allocator, labelId); }
};
// ============================================================================
// [asmjit::JumpNode]
// ============================================================================
//! Jump instruction with \ref JumpAnnotation.
//!
//! \note This node should be only used to represent jump where the jump target
//! cannot be deduced by examining instruction operands. For example if the jump
//! target is register or memory location. This pattern is often used to perform
//! indirect jumps that use jump table, e.g. to implement `switch{}` statement.
class JumpNode : public InstNode {
public:
ASMJIT_NONCOPYABLE(JumpNode)
JumpAnnotation* _annotation;
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE JumpNode(BaseCompiler* cc, uint32_t instId, uint32_t options, uint32_t opCount, JumpAnnotation* annotation) noexcept
: InstNode(cc, instId, options, opCount, kBaseOpCapacity),
_annotation(annotation) {
setType(kNodeJump);
}
//! \}
//! \name Accessors
//! \{
//! Tests whether this JumpNode has associated a \ref JumpAnnotation.
inline bool hasAnnotation() const noexcept { return _annotation != nullptr; }
//! Returns the \ref JumpAnnotation associated with this jump, or `nullptr`.
inline JumpAnnotation* annotation() const noexcept { return _annotation; }
//! Sets the \ref JumpAnnotation associated with this jump to `annotation`.
inline void setAnnotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; }
//! \}
};
// ============================================================================
// [asmjit::FuncNode]
// ============================================================================
//! Function node represents a function used by \ref BaseCompiler.
//!
//! A function is composed of the following:
//!
//! - Function entry, \ref FuncNode acts as a label, so the entry is implicit.
//! To get the entry, simply use \ref FuncNode::label(), which is the same
//! as \ref LabelNode::label().
//!
//! - Function exit, which is represented by \ref FuncNode::exitNode(). A
//! helper function \ref FuncNode::exitLabel() exists and returns an exit
//! label instead of node.
//!
//! - Function \ref FuncNode::endNode() sentinel. This node marks the end of
//! a function - there should be no code that belongs to the function after
//! this node, but the Compiler doesn't enforce that at the moment.
//!
//! - Function detail, see \ref FuncNode::detail().
//!
//! - Function frame, see \ref FuncNode::frame().
//!
//! - Function arguments mapped to virtual registers, see \ref FuncNode::args().
//!
//! In a node list, the function and its body looks like the following:
//!
//! \code{.unparsed}
//! [...] - Anything before the function.
//!
//! [FuncNode] - Entry point of the function, acts as a label as well.
//! <Prolog> - Prolog inserted by the register allocator.
//! {...} - Function body - user code basically.
//! [ExitLabel] - Exit label
//! <Epilog> - Epilog inserted by the register allocator.
//! <Return> - Return inserted by the register allocator.
//! {...} - Can contain data or user code (error handling, special cases, ...).
//! [FuncEnd] - End sentinel
//!
//! [...] - Anything after the function.
//! \endcode
//!
//! When a function is added to the compiler by \ref BaseCompiler::addFunc() it
//! actually inserts 3 nodes (FuncNode, ExitLabel, and FuncEnd) and sets the
//! current cursor to be FuncNode. When \ref BaseCompiler::endFunc() is called
//! the cursor is set to FuncEnd. This guarantees that user can use ExitLabel
//! as a marker after additional code or data can be placed, and it's a common
//! practice.
class FuncNode : public LabelNode {
public:
ASMJIT_NONCOPYABLE(FuncNode)
//! Arguments pack.
struct ArgPack {
VirtReg* _data[Globals::kMaxValuePack];
inline void reset() noexcept {
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
_data[valueIndex] = nullptr;
}
inline VirtReg*& operator[](size_t valueIndex) noexcept { return _data[valueIndex]; }
inline VirtReg* const& operator[](size_t valueIndex) const noexcept { return _data[valueIndex]; }
};
//! Function detail.
FuncDetail _funcDetail;
//! Function frame.
FuncFrame _frame;
//! Function exit label.
LabelNode* _exitNode;
//! Function end (sentinel).
SentinelNode* _end;
//! Argument packs.
ArgPack* _args;
//! \name Construction & Destruction
//! \{
//! Creates a new `FuncNode` instance.
//!
//! Always use `BaseCompiler::addFunc()` to create `FuncNode`.
ASMJIT_INLINE FuncNode(BaseBuilder* cb) noexcept
: LabelNode(cb),
_funcDetail(),
_frame(),
_exitNode(nullptr),
_end(nullptr),
_args(nullptr) {
setType(kNodeFunc);
}
//! \}
//! \{
//! \name Accessors
//! Returns function exit `LabelNode`.
inline LabelNode* exitNode() const noexcept { return _exitNode; }
//! Returns function exit label.
inline Label exitLabel() const noexcept { return _exitNode->label(); }
//! Returns "End of Func" sentinel.
inline SentinelNode* endNode() const noexcept { return _end; }
//! Returns function declaration.
inline FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns function declaration.
inline const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns function frame.
inline FuncFrame& frame() noexcept { return _frame; }
//! Returns function frame.
inline const FuncFrame& frame() const noexcept { return _frame; }
//! Tests whether the function has a return value.
inline bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns arguments count.
inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns argument packs.
inline ArgPack* argPacks() const noexcept { return _args; }
//! Returns argument pack at `argIndex`.
inline ArgPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! Sets argument at `argIndex`.
inline void setArg(size_t argIndex, VirtReg* vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][0] = vReg;
}
//! Sets argument at `argIndex` and `valueIndex`.
inline void setArg(size_t argIndex, size_t valueIndex, VirtReg* vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex] = vReg;
}
//! Resets argument pack at `argIndex`.
inline void resetArg(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex].reset();
}
//! Resets argument pack at `argIndex`.
inline void resetArg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex] = nullptr;
}
//! Returns function attributes.
inline uint32_t attributes() const noexcept { return _frame.attributes(); }
//! Adds `attrs` to the function attributes.
inline void addAttributes(uint32_t attrs) noexcept { _frame.addAttributes(attrs); }
//! \}
};
// ============================================================================
// [asmjit::FuncRetNode]
// ============================================================================
//! Function return, used by \ref BaseCompiler.
class FuncRetNode : public InstNode {
public:
ASMJIT_NONCOPYABLE(FuncRetNode)
//! \name Construction & Destruction
//! \{
//! Creates a new `FuncRetNode` instance.
inline FuncRetNode(BaseBuilder* cb) noexcept : InstNode(cb, BaseInst::kIdAbstract, 0, 0) {
_any._nodeType = kNodeFuncRet;
}
//! \}
};
// ============================================================================
// [asmjit::InvokeNode]
// ============================================================================
//! Function invocation, used by \ref BaseCompiler.
class InvokeNode : public InstNode {
public:
ASMJIT_NONCOPYABLE(InvokeNode)
//! Operand pack provides multiple operands that can be associated with a
//! single return value of function argument. Sometims this is necessary to
//! express an argument or return value that requires multiple registers, for
//! example 64-bit value in 32-bit mode or passing / returning homogenous data
//! structures.
struct OperandPack {
//! Operands.
Operand_ _data[Globals::kMaxValuePack];
//! Reset the pack by resetting all operands in the pack.
inline void reset() noexcept {
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
_data[valueIndex].reset();
}
//! Returns an operand at the given `valueIndex`.
inline Operand& operator[](size_t valueIndex) noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>();
}
//! Returns an operand at the given `valueIndex` (const).
const inline Operand& operator[](size_t valueIndex) const noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>();
}
};
//! Function detail.
FuncDetail _funcDetail;
//! Function return value(s).
OperandPack _rets;
//! Function arguments.
OperandPack* _args;
//! \name Construction & Destruction
//! \{
//! Creates a new `InvokeNode` instance.
inline InvokeNode(BaseBuilder* cb, uint32_t instId, uint32_t options) noexcept
: InstNode(cb, instId, options, kBaseOpCapacity),
_funcDetail(),
_args(nullptr) {
setType(kNodeInvoke);
_resetOps();
_rets.reset();
addFlags(kFlagIsRemovable);
}
//! \}
//! \name Accessors
//! \{
//! Sets the function signature.
inline Error init(const FuncSignature& signature, const Environment& environment) noexcept {
return _funcDetail.init(signature, environment);
}
//! Returns the function detail.
inline FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns the function detail.
inline const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns the target operand.
inline Operand& target() noexcept { return _opArray[0].as<Operand>(); }
//! \overload
inline const Operand& target() const noexcept { return _opArray[0].as<Operand>(); }
//! Returns the number of function return values.
inline bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns the number of function arguments.
inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns operand pack representing function return value(s).
inline OperandPack& retPack() noexcept { return _rets; }
//! Returns operand pack representing function return value(s).
inline const OperandPack& retPack() const noexcept { return _rets; }
//! Returns the return value at the given `valueIndex`.
inline Operand& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; }
//! \overload
inline const Operand& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; }
//! Returns operand pack representing function return value(s).
inline OperandPack& argPack(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! \overload
inline const OperandPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! Returns a function argument at the given `argIndex`.
inline Operand& arg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex];
}
//! \overload
inline const Operand& arg(size_t argIndex, size_t valueIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex];
}
//! Sets the function return value at `i` to `op`.
inline void _setRet(size_t valueIndex, const Operand_& op) noexcept { _rets[valueIndex] = op; }
//! Sets the function argument at `i` to `op`.
inline void _setArg(size_t argIndex, size_t valueIndex, const Operand_& op) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex] = op;
}
//! Sets the function return value at `valueIndex` to `reg`.
inline void setRet(size_t valueIndex, const BaseReg& reg) noexcept { _setRet(valueIndex, reg); }
//! Sets the first function argument in a value-pack at `argIndex` to `reg`.
inline void setArg(size_t argIndex, const BaseReg& reg) noexcept { _setArg(argIndex, 0, reg); }
//! Sets the first function argument in a value-pack at `argIndex` to `imm`.
inline void setArg(size_t argIndex, const Imm& imm) noexcept { _setArg(argIndex, 0, imm); }
//! Sets the function argument at `argIndex` and `valueIndex` to `reg`.
inline void setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) noexcept { _setArg(argIndex, valueIndex, reg); }
//! Sets the function argument at `argIndex` and `valueIndex` to `imm`.
inline void setArg(size_t argIndex, size_t valueIndex, const Imm& imm) noexcept { _setArg(argIndex, valueIndex, imm); }
//! \}
};
// ============================================================================
// [asmjit::FuncPass]
// ============================================================================
//! Function pass extends \ref Pass with \ref FuncPass::runOnFunction().
class ASMJIT_VIRTAPI FuncPass : public Pass {
public:
ASMJIT_NONCOPYABLE(FuncPass)
typedef Pass Base;
//! \name Construction & Destruction
//! \{
ASMJIT_API FuncPass(const char* name) noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns the associated `BaseCompiler`.
inline BaseCompiler* cc() const noexcept { return static_cast<BaseCompiler*>(_cb); }
//! \}
//! \name Run
//! \{
//! Calls `runOnFunction()` on each `FuncNode` node found.
ASMJIT_API Error run(Zone* zone, Logger* logger) override;
//! Called once per `FuncNode`.
virtual Error runOnFunction(Zone* zone, Logger* logger, FuncNode* func) = 0;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_COMPILER_H_INCLUDED

View File

@ -0,0 +1,170 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
#define ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
#include "../core/api-config.h"
#include "../core/operand.h"
#include "../core/zonestring.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [Forward Declarations]
// ============================================================================
class RAWorkReg;
//! \addtogroup asmjit_compiler
//! \{
// ============================================================================
// [asmjit::VirtReg]
// ============================================================================
//! Virtual register data, managed by \ref BaseCompiler.
class VirtReg {
public:
ASMJIT_NONCOPYABLE(VirtReg)
//! Virtual register id.
uint32_t _id = 0;
//! Virtual register info (signature).
RegInfo _info = {};
//! Virtual register size (can be smaller than `regInfo._size`).
uint32_t _virtSize = 0;
//! Virtual register alignment (for spilling).
uint8_t _alignment = 0;
//! Type-id.
uint8_t _typeId = 0;
//! Virtual register weight for alloc/spill decisions.
uint8_t _weight = 1;
//! True if this is a fixed register, never reallocated.
uint8_t _isFixed : 1;
//! True if the virtual register is only used as a stack (never accessed as register).
uint8_t _isStack : 1;
uint8_t _reserved : 6;
//! Virtual register name (user provided or automatically generated).
ZoneString<16> _name {};
// -------------------------------------------------------------------------
// The following members are used exclusively by RAPass. They are initialized
// when the VirtReg is created to NULL pointers and then changed during RAPass
// execution. RAPass sets them back to NULL before it returns.
// -------------------------------------------------------------------------
//! Reference to `RAWorkReg`, used during register allocation.
RAWorkReg* _workReg = nullptr;
//! \name Construction & Destruction
//! \{
inline VirtReg(uint32_t id, uint32_t signature, uint32_t virtSize, uint32_t alignment, uint32_t typeId) noexcept
: _id(id),
_info { signature },
_virtSize(virtSize),
_alignment(uint8_t(alignment)),
_typeId(uint8_t(typeId)),
_isFixed(false),
_isStack(false),
_reserved(0) {}
//! \}
//! \name Accessors
//! \{
//! Returns the virtual register id.
inline uint32_t id() const noexcept { return _id; }
//! Returns the virtual register name.
inline const char* name() const noexcept { return _name.data(); }
//! Returns the size of the virtual register name.
inline uint32_t nameSize() const noexcept { return _name.size(); }
//! Returns a register information that wraps the register signature.
inline const RegInfo& info() const noexcept { return _info; }
//! Returns a virtual register type (maps to the physical register type as well).
inline uint32_t type() const noexcept { return _info.type(); }
//! Returns a virtual register group (maps to the physical register group as well).
inline uint32_t group() const noexcept { return _info.group(); }
//! Returns a real size of the register this virtual register maps to.
//!
//! For example if this is a 128-bit SIMD register used for a scalar single
//! precision floating point value then its virtSize would be 4, however, the
//! `regSize` would still say 16 (128-bits), because it's the smallest size
//! of that register type.
inline uint32_t regSize() const noexcept { return _info.size(); }
//! Returns a register signature of this virtual register.
inline uint32_t signature() const noexcept { return _info.signature(); }
//! Returns the virtual register size.
//!
//! The virtual register size describes how many bytes the virtual register
//! needs to store its content. It can be smaller than the physical register
//! size, see `regSize()`.
inline uint32_t virtSize() const noexcept { return _virtSize; }
//! Returns the virtual register alignment.
inline uint32_t alignment() const noexcept { return _alignment; }
//! Returns the virtual register type id, see `Type::Id`.
inline uint32_t typeId() const noexcept { return _typeId; }
//! Returns the virtual register weight - the register allocator can use it
//! as explicit hint for alloc/spill decisions.
inline uint32_t weight() const noexcept { return _weight; }
//! Sets the virtual register weight (0 to 255) - the register allocator can
//! use it as explicit hint for alloc/spill decisions and initial bin-packing.
inline void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); }
//! Returns whether the virtual register is always allocated to a fixed
//! physical register (and never reallocated).
//!
//! \note This is only used for special purposes and it's mostly internal.
inline bool isFixed() const noexcept { return bool(_isFixed); }
//! Returns whether the virtual register is indeed a stack that only uses
//! the virtual register id for making it accessible.
//!
//! \note It's an error if a stack is accessed as a register.
inline bool isStack() const noexcept { return bool(_isStack); }
inline bool hasWorkReg() const noexcept { return _workReg != nullptr; }
inline RAWorkReg* workReg() const noexcept { return _workReg; }
inline void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; }
inline void resetWorkReg() noexcept { _workReg = nullptr; }
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_COMPILERDEFS_H_INCLUDED

View File

@ -0,0 +1,375 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/constpool.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ConstPool - Construction / Destruction]
// ============================================================================
ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
ConstPool::~ConstPool() noexcept {}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
void ConstPool::reset(Zone* zone) noexcept {
_zone = zone;
size_t dataSize = 1;
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].reset();
_tree[i].setDataSize(dataSize);
_gaps[i] = nullptr;
dataSize <<= 1;
}
_gapPool = nullptr;
_size = 0;
_alignment = 0;
}
// ============================================================================
// [asmjit::ConstPool - Ops]
// ============================================================================
static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
ConstPool::Gap* gap = self->_gapPool;
if (!gap)
return self->_zone->allocT<ConstPool::Gap>();
self->_gapPool = gap->_next;
return gap;
}
static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
gap->_next = self->_gapPool;
self->_gapPool = gap;
}
static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexcept {
ASMJIT_ASSERT(size > 0);
while (size > 0) {
size_t gapIndex;
size_t gapSize;
if (size >= 16 && Support::isAligned<size_t>(offset, 16)) {
gapIndex = ConstPool::kIndex16;
gapSize = 16;
}
else if (size >= 8 && Support::isAligned<size_t>(offset, 8)) {
gapIndex = ConstPool::kIndex8;
gapSize = 8;
}
else if (size >= 4 && Support::isAligned<size_t>(offset, 4)) {
gapIndex = ConstPool::kIndex4;
gapSize = 4;
}
else if (size >= 2 && Support::isAligned<size_t>(offset, 2)) {
gapIndex = ConstPool::kIndex2;
gapSize = 2;
}
else {
gapIndex = ConstPool::kIndex1;
gapSize = 1;
}
// We don't have to check for errors here, if this failed nothing really
// happened (just the gap won't be visible) and it will fail again at
// place where the same check would generate `kErrorOutOfMemory` error.
ConstPool::Gap* gap = ConstPool_allocGap(self);
if (!gap)
return;
gap->_next = self->_gaps[gapIndex];
self->_gaps[gapIndex] = gap;
gap->_offset = offset;
gap->_size = gapSize;
offset += gapSize;
size -= gapSize;
}
}
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
size_t treeIndex;
if (size == 32)
treeIndex = kIndex32;
else if (size == 16)
treeIndex = kIndex16;
else if (size == 8)
treeIndex = kIndex8;
else if (size == 4)
treeIndex = kIndex4;
else if (size == 2)
treeIndex = kIndex2;
else if (size == 1)
treeIndex = kIndex1;
else
return DebugUtils::errored(kErrorInvalidArgument);
ConstPool::Node* node = _tree[treeIndex].get(data);
if (node) {
dstOffset = node->_offset;
return kErrorOk;
}
// Before incrementing the current offset try if there is a gap that can
// be used for the requested data.
size_t offset = ~size_t(0);
size_t gapIndex = treeIndex;
while (gapIndex != kIndexCount - 1) {
ConstPool::Gap* gap = _gaps[treeIndex];
// Check if there is a gap.
if (gap) {
size_t gapOffset = gap->_offset;
size_t gapSize = gap->_size;
// Destroy the gap for now.
_gaps[treeIndex] = gap->_next;
ConstPool_freeGap(this, gap);
offset = gapOffset;
ASMJIT_ASSERT(Support::isAligned<size_t>(offset, size));
gapSize -= size;
if (gapSize > 0)
ConstPool_addGap(this, gapOffset, gapSize);
}
gapIndex++;
}
if (offset == ~size_t(0)) {
// Get how many bytes have to be skipped so the address is aligned accordingly
// to the 'size'.
size_t diff = Support::alignUpDiff<size_t>(_size, size);
if (diff != 0) {
ConstPool_addGap(this, _size, diff);
_size += diff;
}
offset = _size;
_size += size;
}
// Add the initial node to the right index.
node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
if (!node) return DebugUtils::errored(kErrorOutOfMemory);
_tree[treeIndex].insert(node);
_alignment = Support::max<size_t>(_alignment, size);
dstOffset = offset;
// Now create a bunch of shared constants that are based on the data pattern.
// We stop at size 4, it probably doesn't make sense to split constants down
// to 1 byte.
size_t pCount = 1;
while (size > 4) {
size >>= 1;
pCount <<= 1;
ASMJIT_ASSERT(treeIndex != 0);
treeIndex--;
const uint8_t* pData = static_cast<const uint8_t*>(data);
for (size_t i = 0; i < pCount; i++, pData += size) {
node = _tree[treeIndex].get(pData);
if (node) continue;
node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true);
_tree[treeIndex].insert(node);
}
}
return kErrorOk;
}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
struct ConstPoolFill {
inline ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
_dst(dst),
_dataSize(dataSize) {}
inline void operator()(const ConstPool::Node* node) noexcept {
if (!node->_shared)
memcpy(_dst + node->_offset, node->data(), _dataSize);
}
uint8_t* _dst;
size_t _dataSize;
};
void ConstPool::fill(void* dst) const noexcept {
// Clears possible gaps, asmjit should never emit garbage to the output.
memset(dst, 0, _size);
ConstPoolFill filler(static_cast<uint8_t*>(dst), 1);
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].forEach(filler);
filler._dataSize <<= 1;
}
}
// ============================================================================
// [asmjit::ConstPool - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(const_pool) {
Zone zone(32384 - Zone::kBlockOverhead);
ConstPool pool(&zone);
uint32_t i;
uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 1000000;
INFO("Adding %u constants to the pool", kCount);
{
size_t prevOffset;
size_t curOffset;
uint64_t c = 0x0101010101010101u;
EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk);
EXPECT(prevOffset == 0);
for (i = 1; i < kCount; i++) {
c++;
EXPECT(pool.add(&c, 8, curOffset) == kErrorOk);
EXPECT(prevOffset + 8 == curOffset);
EXPECT(pool.size() == (i + 1) * 8);
prevOffset = curOffset;
}
EXPECT(pool.alignment() == 8);
}
INFO("Retrieving %u constants from the pool", kCount);
{
uint64_t c = 0x0101010101010101u;
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT(pool.add(&c, 8, offset) == kErrorOk);
EXPECT(offset == i * 8);
c++;
}
}
INFO("Checking if the constants were split into 4-byte patterns");
{
uint32_t c = 0x01010101;
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT(pool.add(&c, 4, offset) == kErrorOk);
EXPECT(offset == i * 8);
c++;
}
}
INFO("Adding 2 byte constant to misalign the current offset");
{
uint16_t c = 0xFFFF;
size_t offset;
EXPECT(pool.add(&c, 2, offset) == kErrorOk);
EXPECT(offset == kCount * 8);
EXPECT(pool.alignment() == 8);
}
INFO("Adding 8 byte constant to check if pool gets aligned again");
{
uint64_t c = 0xFFFFFFFFFFFFFFFFu;
size_t offset;
EXPECT(pool.add(&c, 8, offset) == kErrorOk);
EXPECT(offset == kCount * 8 + 8);
}
INFO("Adding 2 byte constant to verify the gap is filled");
{
uint16_t c = 0xFFFE;
size_t offset;
EXPECT(pool.add(&c, 2, offset) == kErrorOk);
EXPECT(offset == kCount * 8 + 2);
EXPECT(pool.alignment() == 8);
}
INFO("Checking reset functionality");
{
pool.reset(&zone);
zone.reset();
EXPECT(pool.size() == 0);
EXPECT(pool.alignment() == 0);
}
INFO("Checking pool alignment when combined constants are added");
{
uint8_t bytes[32] = { 0 };
size_t offset;
pool.add(bytes, 1, offset);
EXPECT(pool.size() == 1);
EXPECT(pool.alignment() == 1);
EXPECT(offset == 0);
pool.add(bytes, 2, offset);
EXPECT(pool.size() == 4);
EXPECT(pool.alignment() == 2);
EXPECT(offset == 2);
pool.add(bytes, 4, offset);
EXPECT(pool.size() == 8);
EXPECT(pool.alignment() == 4);
EXPECT(offset == 4);
pool.add(bytes, 4, offset);
EXPECT(pool.size() == 8);
EXPECT(pool.alignment() == 4);
EXPECT(offset == 4);
pool.add(bytes, 32, offset);
EXPECT(pool.size() == 64);
EXPECT(pool.alignment() == 32);
EXPECT(offset == 32);
}
}
#endif
ASMJIT_END_NAMESPACE

262
deps/asmjit/src/asmjit/core/constpool.h vendored Normal file
View File

@ -0,0 +1,262 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_CONSTPOOL_H_INCLUDED
#define ASMJIT_CORE_CONSTPOOL_H_INCLUDED
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonetree.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_utilities
//! \{
// ============================================================================
// [asmjit::ConstPool]
// ============================================================================
//! Constant pool.
class ConstPool {
public:
ASMJIT_NONCOPYABLE(ConstPool)
//! Constant pool scope.
enum Scope : uint32_t {
//! Local constant, always embedded right after the current function.
kScopeLocal = 0,
//! Global constant, embedded at the end of the currently compiled code.
kScopeGlobal = 1
};
//! \cond INTERNAL
//! Index of a given size in const-pool table.
enum Index : uint32_t {
kIndex1 = 0,
kIndex2 = 1,
kIndex4 = 2,
kIndex8 = 3,
kIndex16 = 4,
kIndex32 = 5,
kIndexCount = 6
};
//! Zone-allocated const-pool gap created by two differently aligned constants.
struct Gap {
//! Pointer to the next gap
Gap* _next;
//! Offset of the gap.
size_t _offset;
//! Remaining bytes of the gap (basically a gap size).
size_t _size;
};
//! Zone-allocated const-pool node.
class Node : public ZoneTreeNodeT<Node> {
public:
ASMJIT_NONCOPYABLE(Node)
//! If this constant is shared with another.
uint32_t _shared : 1;
//! Data offset from the beginning of the pool.
uint32_t _offset;
inline Node(size_t offset, bool shared) noexcept
: ZoneTreeNodeT<Node>(),
_shared(shared),
_offset(uint32_t(offset)) {}
inline void* data() const noexcept {
return static_cast<void*>(const_cast<ConstPool::Node*>(this) + 1);
}
};
//! Data comparer used internally.
class Compare {
public:
size_t _dataSize;
inline Compare(size_t dataSize) noexcept
: _dataSize(dataSize) {}
inline int operator()(const Node& a, const Node& b) const noexcept {
return ::memcmp(a.data(), b.data(), _dataSize);
}
inline int operator()(const Node& a, const void* data) const noexcept {
return ::memcmp(a.data(), data, _dataSize);
}
};
//! Zone-allocated const-pool tree.
struct Tree {
//! RB tree.
ZoneTree<Node> _tree;
//! Size of the tree (number of nodes).
size_t _size;
//! Size of the data.
size_t _dataSize;
inline explicit Tree(size_t dataSize = 0) noexcept
: _tree(),
_size(0),
_dataSize(dataSize) {}
inline void reset() noexcept {
_tree.reset();
_size = 0;
}
inline bool empty() const noexcept { return _size == 0; }
inline size_t size() const noexcept { return _size; }
inline void setDataSize(size_t dataSize) noexcept {
ASMJIT_ASSERT(empty());
_dataSize = dataSize;
}
inline Node* get(const void* data) noexcept {
Compare cmp(_dataSize);
return _tree.get(data, cmp);
}
inline void insert(Node* node) noexcept {
Compare cmp(_dataSize);
_tree.insert(node, cmp);
_size++;
}
template<typename Visitor>
inline void forEach(Visitor& visitor) const noexcept {
Node* node = _tree.root();
if (!node) return;
Node* stack[Globals::kMaxTreeHeight];
size_t top = 0;
for (;;) {
Node* left = node->left();
if (left != nullptr) {
ASMJIT_ASSERT(top != Globals::kMaxTreeHeight);
stack[top++] = node;
node = left;
continue;
}
for (;;) {
visitor(node);
node = node->right();
if (node != nullptr)
break;
if (top == 0)
return;
node = stack[--top];
}
}
}
static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
Node* node = zone->allocT<Node>(sizeof(Node) + size);
if (ASMJIT_UNLIKELY(!node)) return nullptr;
node = new(node) Node(offset, shared);
memcpy(node->data(), data, size);
return node;
}
};
//! \endcond
//! Zone allocator.
Zone* _zone;
//! Tree per size.
Tree _tree[kIndexCount];
//! Gaps per size.
Gap* _gaps[kIndexCount];
//! Gaps pool
Gap* _gapPool;
//! Size of the pool (in bytes).
size_t _size;
//! Required pool alignment.
size_t _alignment;
//! \name Construction & Destruction
//! \{
ASMJIT_API ConstPool(Zone* zone) noexcept;
ASMJIT_API ~ConstPool() noexcept;
ASMJIT_API void reset(Zone* zone) noexcept;
//! \}
//! \name Accessors
//! \{
//! Tests whether the constant-pool is empty.
inline bool empty() const noexcept { return _size == 0; }
//! Returns the size of the constant-pool in bytes.
inline size_t size() const noexcept { return _size; }
//! Returns minimum alignment.
inline size_t alignment() const noexcept { return _alignment; }
//! \}
//! \name Utilities
//! \{
//! Adds a constant to the constant pool.
//!
//! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes.
//! The constant is added to the pool only if it doesn't not exist, otherwise
//! cached value is returned.
//!
//! AsmJit is able to subdivide added constants, so for example if you add
//! 8-byte constant 0x1122334455667788 it will create the following slots:
//!
//! 8-byte: 0x1122334455667788
//! 4-byte: 0x11223344, 0x55667788
//!
//! The reason is that when combining MMX/SSE/AVX code some patterns are used
//! frequently. However, AsmJit is not able to reallocate a constant that has
//! been already added. For example if you try to add 4-byte constant and then
//! 8-byte constant having the same 4-byte pattern as the previous one, two
//! independent slots will be generated by the pool.
ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
//! Fills the destination with the content of this constant pool.
ASMJIT_API void fill(void* dst) const noexcept;
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CONSTPOOL_H_INCLUDED

97
deps/asmjit/src/asmjit/core/cpuinfo.cpp vendored Normal file
View File

@ -0,0 +1,97 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/cpuinfo.h"
#if !defined(_WIN32)
#include <errno.h>
#include <sys/utsname.h>
#include <unistd.h>
#endif
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::CpuInfo - Detect - CPU NumThreads]
// ============================================================================
#if defined(_WIN32)
static inline uint32_t detectHWThreadCount() noexcept {
SYSTEM_INFO info;
::GetSystemInfo(&info);
return info.dwNumberOfProcessors;
}
#elif defined(_SC_NPROCESSORS_ONLN)
static inline uint32_t detectHWThreadCount() noexcept {
long res = ::sysconf(_SC_NPROCESSORS_ONLN);
return res <= 0 ? uint32_t(1) : uint32_t(res);
}
#else
static inline uint32_t detectHWThreadCount() noexcept {
return 1;
}
#endif
// ============================================================================
// [asmjit::CpuInfo - Detect - CPU Features]
// ============================================================================
#if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86
namespace x86 { void detectCpu(CpuInfo& cpu) noexcept; }
#endif
#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM
namespace arm { void detectCpu(CpuInfo& cpu) noexcept; }
#endif
// ============================================================================
// [asmjit::CpuInfo - Detect - Static Initializer]
// ============================================================================
static uint32_t cpuInfoInitialized;
static CpuInfo cpuInfoGlobal(Globals::NoInit);
const CpuInfo& CpuInfo::host() noexcept {
// This should never cause a problem as the resulting information should
// always be the same.
if (!cpuInfoInitialized) {
CpuInfo cpuInfoLocal;
#if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86
x86::detectCpu(cpuInfoLocal);
#endif
#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM
arm::detectCpu(cpuInfoLocal);
#endif
cpuInfoLocal._hwThreadCount = detectHWThreadCount();
cpuInfoGlobal = cpuInfoLocal;
cpuInfoInitialized = 1;
}
return cpuInfoGlobal;
}
ASMJIT_END_NAMESPACE

154
deps/asmjit/src/asmjit/core/cpuinfo.h vendored Normal file
View File

@ -0,0 +1,154 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_CPUINFO_H_INCLUDED
#define ASMJIT_CORE_CPUINFO_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/features.h"
#include "../core/globals.h"
#include "../core/string.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::CpuInfo]
// ============================================================================
//! CPU information.
class CpuInfo {
public:
//! Architecture.
uint8_t _arch;
//! Sub-architecture.
uint8_t _subArch;
//! Reserved for future use.
uint16_t _reserved;
//! CPU family ID.
uint32_t _familyId;
//! CPU model ID.
uint32_t _modelId;
//! CPU brand ID.
uint32_t _brandId;
//! CPU stepping.
uint32_t _stepping;
//! Processor type.
uint32_t _processorType;
//! Maximum number of addressable IDs for logical processors.
uint32_t _maxLogicalProcessors;
//! Cache line size (in bytes).
uint32_t _cacheLineSize;
//! Number of hardware threads.
uint32_t _hwThreadCount;
//! CPU vendor string.
FixedString<16> _vendor;
//! CPU brand string.
FixedString<64> _brand;
//! CPU features.
BaseFeatures _features;
//! \name Construction & Destruction
//! \{
inline CpuInfo() noexcept { reset(); }
inline CpuInfo(const CpuInfo& other) noexcept = default;
inline explicit CpuInfo(Globals::NoInit_) noexcept
: _features(Globals::NoInit) {};
//! Returns the host CPU information.
ASMJIT_API static const CpuInfo& host() noexcept;
//! Initializes CpuInfo to the given architecture, see \ref Environment.
inline void initArch(uint32_t arch, uint32_t subArch = 0u) noexcept {
_arch = uint8_t(arch);
_subArch = uint8_t(subArch);
}
inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
//! \}
//! \name Overloaded Operators
//! \{
inline CpuInfo& operator=(const CpuInfo& other) noexcept = default;
//! \}
//! \name Accessors
//! \{
//! Returns the CPU architecture id, see \ref Environment::Arch.
inline uint32_t arch() const noexcept { return _arch; }
//! Returns the CPU architecture sub-id, see \ref Environment::SubArch.
inline uint32_t subArch() const noexcept { return _subArch; }
//! Returns the CPU family ID.
inline uint32_t familyId() const noexcept { return _familyId; }
//! Returns the CPU model ID.
inline uint32_t modelId() const noexcept { return _modelId; }
//! Returns the CPU brand id.
inline uint32_t brandId() const noexcept { return _brandId; }
//! Returns the CPU stepping.
inline uint32_t stepping() const noexcept { return _stepping; }
//! Returns the processor type.
inline uint32_t processorType() const noexcept { return _processorType; }
//! Returns the number of maximum logical processors.
inline uint32_t maxLogicalProcessors() const noexcept { return _maxLogicalProcessors; }
//! Returns the size of a cache line flush.
inline uint32_t cacheLineSize() const noexcept { return _cacheLineSize; }
//! Returns number of hardware threads available.
inline uint32_t hwThreadCount() const noexcept { return _hwThreadCount; }
//! Returns the CPU vendor.
inline const char* vendor() const noexcept { return _vendor.str; }
//! Tests whether the CPU vendor is equal to `s`.
inline bool isVendor(const char* s) const noexcept { return _vendor.eq(s); }
//! Returns the CPU brand string.
inline const char* brand() const noexcept { return _brand.str; }
//! Returns all CPU features as `BaseFeatures`, cast to your arch-specific class
//! if needed.
template<typename T = BaseFeatures>
inline const T& features() const noexcept { return _features.as<T>(); }
//! Tests whether the CPU has the given `feature`.
inline bool hasFeature(uint32_t featureId) const noexcept { return _features.has(featureId); }
//! Adds the given CPU `feature` to the list of this CpuInfo features.
inline CpuInfo& addFeature(uint32_t featureId) noexcept { _features.add(featureId); return *this; }
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CPUINFO_H_INCLUDED

1071
deps/asmjit/src/asmjit/core/datatypes.h vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,351 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/emithelper_p.h"
#include "../core/formatter.h"
#include "../core/funcargscontext_p.h"
#include "../core/radefs_p.h"
// Can be used for debugging...
// #define ASMJIT_DUMP_ARGS_ASSIGNMENT
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::BaseEmitHelper - Formatting]
// ============================================================================
#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
static void dumpFuncValue(String& sb, uint32_t arch, const FuncValue& value) noexcept {
Formatter::formatTypeId(sb, value.typeId());
sb.append('@');
if (value.isIndirect())
sb.append('[');
if (value.isReg())
Formatter::formatRegister(sb, 0, nullptr, arch, value.regType(), value.regId());
else if (value.isStack())
sb.appendFormat("[%d]", value.stackOffset());
else
sb.append("<none>");
if (value.isIndirect())
sb.append(']');
}
static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
typedef FuncArgsContext::Var Var;
uint32_t arch = ctx.arch();
uint32_t varCount = ctx.varCount();
for (uint32_t i = 0; i < varCount; i++) {
const Var& var = ctx.var(i);
const FuncValue& dst = var.out;
const FuncValue& cur = var.cur;
sb.appendFormat("Var%u: ", i);
dumpFuncValue(sb, arch, dst);
sb.append(" <- ");
dumpFuncValue(sb, arch, cur);
if (var.isDone())
sb.append(" {Done}");
sb.append('\n');
}
}
#endif
// ============================================================================
// [asmjit::BaseEmitHelper - EmitArgsAssignment]
// ============================================================================
ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
typedef FuncArgsContext::Var Var;
typedef FuncArgsContext::WorkData WorkData;
enum WorkFlags : uint32_t {
kWorkNone = 0x00,
kWorkDidSome = 0x01,
kWorkPending = 0x02,
kWorkPostponed = 0x04
};
uint32_t arch = frame.arch();
const ArchTraits& archTraits = ArchTraits::byArch(arch);
RAConstraints constraints;
FuncArgsContext ctx;
ASMJIT_PROPAGATE(constraints.init(arch));
ASMJIT_PROPAGATE(ctx.initWorkData(frame, args, &constraints));
#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
{
String sb;
dumpAssignment(sb, ctx);
printf("%s\n", sb.data());
}
#endif
uint32_t varCount = ctx._varCount;
WorkData* workData = ctx._workData;
uint32_t saVarId = ctx._saVarId;
BaseReg sp = BaseReg::fromSignatureAndId(_emitter->_gpRegInfo.signature(), archTraits.spRegId());
BaseReg sa = sp;
if (frame.hasDynamicAlignment()) {
if (frame.hasPreservedFP())
sa.setId(archTraits.fpRegId());
else
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
}
// --------------------------------------------------------------------------
// Register to stack and stack to stack moves must be first as now we have
// the biggest chance of having as many as possible unassigned registers.
// --------------------------------------------------------------------------
if (ctx._stackDstMask) {
// Base address of all arguments passed by stack.
BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
BaseMem baseStackPtr(sp, 0);
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (!var.out.isStack())
continue;
FuncValue& cur = var.cur;
FuncValue& out = var.out;
ASMJIT_ASSERT(cur.isReg() || cur.isStack());
BaseReg reg;
BaseMem dstStackPtr = baseStackPtr.cloneAdjusted(out.stackOffset());
BaseMem srcStackPtr = baseArgPtr.cloneAdjusted(cur.stackOffset());
if (cur.isIndirect()) {
if (cur.isStack()) {
// TODO: Indirect stack.
return DebugUtils::errored(kErrorInvalidAssignment);
}
else {
srcStackPtr.setBaseId(cur.regId());
}
}
if (cur.isReg() && !cur.isIndirect()) {
WorkData& wd = workData[archTraits.regTypeToGroup(cur.regType())];
uint32_t rId = cur.regId();
reg.setSignatureAndId(archTraits.regTypeToSignature(cur.regType()), rId);
wd.unassign(varId, rId);
}
else {
// Stack to reg move - tricky since we move stack to stack we can decide which
// register to use. In general we follow the rule that IntToInt moves will use
// GP regs with possibility to signature or zero extend, and all other moves will
// either use GP or VEC regs depending on the size of the move.
RegInfo rInfo = getSuitableRegForMemToMemMove(arch, out.typeId(), cur.typeId());
if (ASMJIT_UNLIKELY(!rInfo.isValid()))
return DebugUtils::errored(kErrorInvalidState);
WorkData& wd = workData[rInfo.group()];
uint32_t availableRegs = wd.availableRegs();
if (ASMJIT_UNLIKELY(!availableRegs))
return DebugUtils::errored(kErrorInvalidState);
uint32_t rId = Support::ctz(availableRegs);
reg.setSignatureAndId(rInfo.signature(), rId);
ASMJIT_PROPAGATE(emitArgMove(reg, out.typeId(), srcStackPtr, cur.typeId()));
}
if (cur.isIndirect() && cur.isReg())
workData[BaseReg::kGroupGp].unassign(varId, cur.regId());
// Register to stack move.
ASMJIT_PROPAGATE(emitRegMove(dstStackPtr, reg, cur.typeId()));
var.markDone();
}
}
// --------------------------------------------------------------------------
// Shuffle all registers that are currently assigned accordingly to target
// assignment.
// --------------------------------------------------------------------------
uint32_t workFlags = kWorkNone;
for (;;) {
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (var.isDone() || !var.cur.isReg())
continue;
FuncValue& cur = var.cur;
FuncValue& out = var.out;
uint32_t curGroup = archTraits.regTypeToGroup(cur.regType());
uint32_t outGroup = archTraits.regTypeToGroup(out.regType());
uint32_t curId = cur.regId();
uint32_t outId = out.regId();
if (curGroup != outGroup) {
// TODO: Conversion is not supported.
return DebugUtils::errored(kErrorInvalidAssignment);
}
else {
WorkData& wd = workData[outGroup];
if (!wd.isAssigned(outId)) {
EmitMove:
ASMJIT_PROPAGATE(
emitArgMove(
BaseReg::fromSignatureAndId(archTraits.regTypeToSignature(out.regType()), outId), out.typeId(),
BaseReg::fromSignatureAndId(archTraits.regTypeToSignature(cur.regType()), curId), cur.typeId()));
wd.reassign(varId, outId, curId);
cur.initReg(out.regType(), outId, out.typeId());
if (outId == out.regId())
var.markDone();
workFlags |= kWorkDidSome | kWorkPending;
}
else {
uint32_t altId = wd._physToVarId[outId];
Var& altVar = ctx._vars[altId];
if (!altVar.out.isInitialized() || (altVar.out.isReg() && altVar.out.regId() == curId)) {
// Only few architectures provide swap operations, and only for few register groups.
if (archTraits.hasSwap(curGroup)) {
uint32_t highestType = Support::max(cur.regType(), altVar.cur.regType());
if (Support::isBetween<uint32_t>(highestType, BaseReg::kTypeGp8Lo, BaseReg::kTypeGp16))
highestType = BaseReg::kTypeGp32;
uint32_t signature = archTraits.regTypeToSignature(highestType);
ASMJIT_PROPAGATE(
emitRegSwap(BaseReg::fromSignatureAndId(signature, outId),
BaseReg::fromSignatureAndId(signature, curId)));
wd.swap(varId, curId, altId, outId);
cur.setRegId(outId);
var.markDone();
altVar.cur.setRegId(curId);
if (altVar.out.isInitialized())
altVar.markDone();
workFlags |= kWorkDidSome;
}
else {
// If there is a scratch register it can be used to perform the swap.
uint32_t availableRegs = wd.availableRegs();
if (availableRegs) {
uint32_t inOutRegs = wd.dstRegs();
if (availableRegs & ~inOutRegs)
availableRegs &= ~inOutRegs;
outId = Support::ctz(availableRegs);
goto EmitMove;
}
else {
workFlags |= kWorkPending;
}
}
}
else {
workFlags |= kWorkPending;
}
}
}
}
if (!(workFlags & kWorkPending))
break;
// If we did nothing twice it means that something is really broken.
if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed)
return DebugUtils::errored(kErrorInvalidState);
workFlags = (workFlags & kWorkDidSome) ? kWorkNone : kWorkPostponed;
}
// --------------------------------------------------------------------------
// Load arguments passed by stack into registers. This is pretty simple and
// it never requires multiple iterations like the previous phase.
// --------------------------------------------------------------------------
if (ctx._hasStackSrc) {
uint32_t iterCount = 1;
if (frame.hasDynamicAlignment() && !frame.hasPreservedFP())
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
// Base address of all arguments passed by stack.
BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
for (uint32_t iter = 0; iter < iterCount; iter++) {
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (var.isDone())
continue;
if (var.cur.isStack()) {
ASMJIT_ASSERT(var.out.isReg());
uint32_t outId = var.out.regId();
uint32_t outType = var.out.regType();
uint32_t group = archTraits.regTypeToGroup(outType);
WorkData& wd = ctx._workData[group];
if (outId == sa.id() && group == BaseReg::kGroupGp) {
// This register will be processed last as we still need `saRegId`.
if (iterCount == 1) {
iterCount++;
continue;
}
wd.unassign(wd._physToVarId[outId], outId);
}
BaseReg dstReg = BaseReg::fromSignatureAndId(archTraits.regTypeToSignature(outType), outId);
BaseMem srcMem = baseArgPtr.cloneAdjusted(var.cur.stackOffset());
ASMJIT_PROPAGATE(emitArgMove(
dstReg, var.out.typeId(),
srcMem, var.cur.typeId()));
wd.assign(varId, outId);
var.cur.initReg(outType, outId, var.cur.typeId(), FuncValue::kFlagIsDone);
}
}
}
}
return kErrorOk;
}
ASMJIT_END_NAMESPACE

View File

@ -0,0 +1,83 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
#define ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
#include "../core/emitter.h"
#include "../core/operand.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::BaseEmitHelper]
// ============================================================================
//! Helper class that provides utilities for each supported architecture.
class BaseEmitHelper {
public:
BaseEmitter* _emitter;
inline explicit BaseEmitHelper(BaseEmitter* emitter = nullptr) noexcept
: _emitter(emitter) {}
inline BaseEmitter* emitter() const noexcept { return _emitter; }
inline void setEmitter(BaseEmitter* emitter) noexcept { _emitter = emitter; }
//! Emits a pure move operation between two registers or the same type or
//! between a register and its home slot. This function does not handle
//! register conversion.
virtual Error emitRegMove(
const Operand_& dst_,
const Operand_& src_, uint32_t typeId, const char* comment = nullptr) = 0;
//! Emits swap between two registers.
virtual Error emitRegSwap(
const BaseReg& a,
const BaseReg& b, const char* comment = nullptr) = 0;
//! Emits move from a function argument (either register or stack) to a register.
//!
//! This function can handle the necessary conversion from one argument to
//! another, and from one register type to another, if it's possible. Any
//! attempt of conversion that requires third register of a different group
//! (for example conversion from K to MMX on X86/X64) will fail.
virtual Error emitArgMove(
const BaseReg& dst_, uint32_t dstTypeId,
const Operand_& src_, uint32_t srcTypeId, const char* comment = nullptr) = 0;
Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_EMITHELPER_P_H_INCLUDED

394
deps/asmjit/src/asmjit/core/emitter.cpp vendored Normal file
View File

@ -0,0 +1,394 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/emitterutils_p.h"
#include "../core/errorhandler.h"
#include "../core/logger.h"
#include "../core/support.h"
#ifdef ASMJIT_BUILD_X86
#include "../x86/x86emithelper_p.h"
#include "../x86/x86instdb_p.h"
#endif // ASMJIT_BUILD_X86
#ifdef ASMJIT_BUILD_ARM
#include "../arm/a64emithelper_p.h"
#include "../arm/a64instdb.h"
#endif // ASMJIT_BUILD_ARM
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::BaseEmitter - Construction / Destruction]
// ============================================================================
BaseEmitter::BaseEmitter(uint32_t emitterType) noexcept
: _emitterType(uint8_t(emitterType)) {}
BaseEmitter::~BaseEmitter() noexcept {
if (_code) {
_addEmitterFlags(kFlagDestroyed);
_code->detach(this);
}
}
// ============================================================================
// [asmjit::BaseEmitter - Finalize]
// ============================================================================
Error BaseEmitter::finalize() {
// Does nothing by default, overridden by `BaseBuilder` and `BaseCompiler`.
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseEmitter - Internals]
// ============================================================================
static constexpr uint32_t kEmitterPreservedFlags =
BaseEmitter::kFlagOwnLogger |
BaseEmitter::kFlagOwnErrorHandler ;
static ASMJIT_NOINLINE void BaseEmitter_updateForcedOptions(BaseEmitter* self) noexcept {
bool hasLogger = self->_logger != nullptr;
bool hasValidationOptions;
if (self->emitterType() == BaseEmitter::kTypeAssembler)
hasValidationOptions = self->hasValidationOption(BaseEmitter::kValidationOptionAssembler);
else
hasValidationOptions = self->hasValidationOption(BaseEmitter::kValidationOptionIntermediate);
self->_forcedInstOptions &= ~BaseInst::kOptionReserved;
if (hasLogger || hasValidationOptions)
self->_forcedInstOptions |= BaseInst::kOptionReserved;
}
// ============================================================================
// [asmjit::BaseEmitter - Validation Options]
// ============================================================================
void BaseEmitter::addValidationOptions(uint32_t options) noexcept {
_validationOptions = uint8_t(_validationOptions | options);
BaseEmitter_updateForcedOptions(this);
}
void BaseEmitter::clearValidationOptions(uint32_t options) noexcept {
_validationOptions = uint8_t(_validationOptions | options);
BaseEmitter_updateForcedOptions(this);
}
// ============================================================================
// [asmjit::BaseEmitter - Logging]
// ============================================================================
void BaseEmitter::setLogger(Logger* logger) noexcept {
#ifndef ASMJIT_NO_LOGGING
if (logger) {
_logger = logger;
_addEmitterFlags(kFlagOwnLogger);
}
else {
_logger = nullptr;
_clearEmitterFlags(kFlagOwnLogger);
if (_code)
_logger = _code->logger();
}
BaseEmitter_updateForcedOptions(this);
#else
DebugUtils::unused(logger);
#endif
}
// ============================================================================
// [asmjit::BaseEmitter - Error Handling]
// ============================================================================
void BaseEmitter::setErrorHandler(ErrorHandler* errorHandler) noexcept {
if (errorHandler) {
_errorHandler = errorHandler;
_addEmitterFlags(kFlagOwnErrorHandler);
}
else {
_errorHandler = nullptr;
_clearEmitterFlags(kFlagOwnErrorHandler);
if (_code)
_errorHandler = _code->errorHandler();
}
}
Error BaseEmitter::reportError(Error err, const char* message) {
ErrorHandler* eh = _errorHandler;
if (eh) {
if (!message)
message = DebugUtils::errorAsString(err);
eh->handleError(err, message, this);
}
return err;
}
// ============================================================================
// [asmjit::BaseEmitter - Labels]
// ============================================================================
Label BaseEmitter::labelByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
return Label(_code ? _code->labelIdByName(name, nameSize, parentId) : uint32_t(Globals::kInvalidId));
}
bool BaseEmitter::isLabelValid(uint32_t labelId) const noexcept {
return _code && labelId < _code->labelCount();
}
// ============================================================================
// [asmjit::BaseEmitter - Emit (Low-Level)]
// ============================================================================
using EmitterUtils::noExt;
Error BaseEmitter::_emitI(uint32_t instId) {
return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0) {
return _emit(instId, o0, noExt[1], noExt[2], noExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1) {
return _emit(instId, o0, o1, noExt[2], noExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2) {
return _emit(instId, o0, o1, o2, noExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
Operand_ opExt[3] = { o3 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4) {
Operand_ opExt[3] = { o3, o4 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
Operand_ opExt[3] = { o3, o4, o5 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount) {
const Operand_* op = operands;
Operand_ opExt[3];
switch (opCount) {
case 0:
return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
case 1:
return _emit(instId, op[0], noExt[1], noExt[2], noExt);
case 2:
return _emit(instId, op[0], op[1], noExt[2], noExt);
case 3:
return _emit(instId, op[0], op[1], op[2], noExt);
case 4:
opExt[0] = op[3];
opExt[1].reset();
opExt[2].reset();
return _emit(instId, op[0], op[1], op[2], opExt);
case 5:
opExt[0] = op[3];
opExt[1] = op[4];
opExt[2].reset();
return _emit(instId, op[0], op[1], op[2], opExt);
case 6:
return _emit(instId, op[0], op[1], op[2], op + 3);
default:
return DebugUtils::errored(kErrorInvalidArgument);
}
}
// ============================================================================
// [asmjit::BaseEmitter - Emit (High-Level)]
// ============================================================================
ASMJIT_FAVOR_SIZE Error BaseEmitter::emitProlog(const FuncFrame& frame) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
#ifdef ASMJIT_BUILD_X86
if (environment().isFamilyX86()) {
x86::EmitHelper emitHelper(this, frame.isAvxEnabled());
return emitHelper.emitProlog(frame);
}
#endif
#ifdef ASMJIT_BUILD_ARM
if (environment().isArchAArch64()) {
a64::EmitHelper emitHelper(this);
return emitHelper.emitProlog(frame);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
ASMJIT_FAVOR_SIZE Error BaseEmitter::emitEpilog(const FuncFrame& frame) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
#ifdef ASMJIT_BUILD_X86
if (environment().isFamilyX86()) {
x86::EmitHelper emitHelper(this, frame.isAvxEnabled());
return emitHelper.emitEpilog(frame);
}
#endif
#ifdef ASMJIT_BUILD_ARM
if (environment().isArchAArch64()) {
a64::EmitHelper emitHelper(this);
return emitHelper.emitEpilog(frame);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
ASMJIT_FAVOR_SIZE Error BaseEmitter::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
#ifdef ASMJIT_BUILD_X86
if (environment().isFamilyX86()) {
x86::EmitHelper emitHelper(this, frame.isAvxEnabled());
return emitHelper.emitArgsAssignment(frame, args);
}
#endif
#ifdef ASMJIT_BUILD_ARM
if (environment().isArchAArch64()) {
a64::EmitHelper emitHelper(this);
return emitHelper.emitArgsAssignment(frame, args);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
// ============================================================================
// [asmjit::BaseEmitter - Comment]
// ============================================================================
Error BaseEmitter::commentf(const char* fmt, ...) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
#ifndef ASMJIT_NO_LOGGING
va_list ap;
va_start(ap, fmt);
Error err = commentv(fmt, ap);
va_end(ap);
return err;
#else
DebugUtils::unused(fmt);
return kErrorOk;
#endif
}
Error BaseEmitter::commentv(const char* fmt, va_list ap) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
#ifndef ASMJIT_NO_LOGGING
StringTmp<1024> sb;
Error err = sb.appendVFormat(fmt, ap);
if (ASMJIT_UNLIKELY(err))
return err;
return comment(sb.data(), sb.size());
#else
DebugUtils::unused(fmt, ap);
return kErrorOk;
#endif
}
// ============================================================================
// [asmjit::BaseEmitter - Events]
// ============================================================================
Error BaseEmitter::onAttach(CodeHolder* code) noexcept {
_code = code;
_environment = code->environment();
const ArchTraits& archTraits = ArchTraits::byArch(code->arch());
uint32_t nativeRegType = Environment::is32Bit(code->arch()) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
_gpRegInfo.setSignature(archTraits._regInfo[nativeRegType].signature());
onSettingsUpdated();
return kErrorOk;
}
Error BaseEmitter::onDetach(CodeHolder* code) noexcept {
DebugUtils::unused(code);
_clearEmitterFlags(~kEmitterPreservedFlags);
_forcedInstOptions = BaseInst::kOptionReserved;
_privateData = 0;
if (!hasOwnLogger())
_logger = nullptr;
if (!hasOwnErrorHandler())
_errorHandler = nullptr;
_environment.reset();
_gpRegInfo.reset();
_instOptions = 0;
_extraReg.reset();
_inlineComment = nullptr;
return kErrorOk;
}
void BaseEmitter::onSettingsUpdated() noexcept {
// Only called when attached to CodeHolder by CodeHolder.
ASMJIT_ASSERT(_code != nullptr);
if (!hasOwnLogger())
_logger = _code->logger();
if (!hasOwnErrorHandler())
_errorHandler = _code->errorHandler();
BaseEmitter_updateForcedOptions(this);
}
ASMJIT_END_NAMESPACE

719
deps/asmjit/src/asmjit/core/emitter.h vendored Normal file
View File

@ -0,0 +1,719 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_EMITTER_H_INCLUDED
#define ASMJIT_CORE_EMITTER_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/codeholder.h"
#include "../core/inst.h"
#include "../core/operand.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class ConstPool;
class FuncFrame;
class FuncArgsAssignment;
// ============================================================================
// [asmjit::BaseEmitter]
// ============================================================================
//! Provides a base foundation to emit code - specialized by `Assembler` and
//! `BaseBuilder`.
class ASMJIT_VIRTAPI BaseEmitter {
public:
ASMJIT_BASE_CLASS(BaseEmitter)
//! See \ref EmitterType.
uint8_t _emitterType = 0;
//! See \ref BaseEmitter::EmitterFlags.
uint8_t _emitterFlags = 0;
//! Validation flags in case validation is used, see \ref InstAPI::ValidationFlags.
//!
//! \note Validation flags are specific to the emitter and they are setup at
//! construction time and then never changed.
uint8_t _validationFlags = 0;
//! Validation options, see \ref ValidationOptions.
uint8_t _validationOptions = 0;
//! Encoding options, see \ref EncodingOptions.
uint32_t _encodingOptions = 0;
//! Forced instruction options, combined with \ref _instOptions by \ref emit().
uint32_t _forcedInstOptions = BaseInst::kOptionReserved;
//! Internal private data used freely by any emitter.
uint32_t _privateData = 0;
//! CodeHolder the emitter is attached to.
CodeHolder* _code = nullptr;
//! Attached \ref Logger.
Logger* _logger = nullptr;
//! Attached \ref ErrorHandler.
ErrorHandler* _errorHandler = nullptr;
//! Describes the target environment, matches \ref CodeHolder::environment().
Environment _environment {};
//! Native GP register signature and signature related information.
RegInfo _gpRegInfo {};
//! Next instruction options (affects the next instruction).
uint32_t _instOptions = 0;
//! Extra register (op-mask {k} on AVX-512) (affects the next instruction).
RegOnly _extraReg {};
//! Inline comment of the next instruction (affects the next instruction).
const char* _inlineComment = nullptr;
//! Emitter type.
enum EmitterType : uint32_t {
//! Unknown or uninitialized.
kTypeNone = 0,
//! Emitter inherits from \ref BaseAssembler.
kTypeAssembler = 1,
//! Emitter inherits from \ref BaseBuilder.
kTypeBuilder = 2,
//! Emitter inherits from \ref BaseCompiler.
kTypeCompiler = 3,
//! Count of emitter types.
kTypeCount = 4
};
//! Emitter flags.
enum EmitterFlags : uint32_t {
//! The emitter has its own \ref Logger (not propagated from \ref CodeHolder).
kFlagOwnLogger = 0x10u,
//! The emitter has its own \ref ErrorHandler (not propagated from \ref CodeHolder).
kFlagOwnErrorHandler = 0x20u,
//! The emitter was finalized.
kFlagFinalized = 0x40u,
//! The emitter was destroyed.
kFlagDestroyed = 0x80u
};
//! Encoding options.
enum EncodingOptions : uint32_t {
//! Emit instructions that are optimized for size, if possible.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! When this option is set it the assembler will try to fix instructions
//! if possible into operation equivalent instructions that take less bytes
//! by taking advantage of implicit zero extension. For example instruction
//! like `mov r64, imm` and `and r64, imm` can be translated to `mov r32, imm`
//! and `and r32, imm` when the immediate constant is lesser than `2^31`.
kEncodingOptionOptimizeForSize = 0x00000001u,
//! Emit optimized code-alignment sequences.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! Default align sequence used by X86 architecture is one-byte (0x90)
//! opcode that is often shown by disassemblers as NOP. However there are
//! more optimized align sequences for 2-11 bytes that may execute faster
//! on certain CPUs. If this feature is enabled AsmJit will generate
//! specialized sequences for alignment between 2 to 11 bytes.
kEncodingOptionOptimizedAlign = 0x00000002u,
//! Emit jump-prediction hints.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! Jump prediction is usually based on the direction of the jump. If the
//! jump is backward it is usually predicted as taken; and if the jump is
//! forward it is usually predicted as not-taken. The reason is that loops
//! generally use backward jumps and conditions usually use forward jumps.
//! However this behavior can be overridden by using instruction prefixes.
//! If this option is enabled these hints will be emitted.
//!
//! This feature is disabled by default, because the only processor that
//! used to take into consideration prediction hints was P4. Newer processors
//! implement heuristics for branch prediction and ignore static hints. This
//! means that this feature can be only used for annotation purposes.
kEncodingOptionPredictedJumps = 0x00000010u
};
#ifndef ASMJIT_NO_DEPRECATED
enum EmitterOptions : uint32_t {
kOptionOptimizedForSize = kEncodingOptionOptimizeForSize,
kOptionOptimizedAlign = kEncodingOptionOptimizedAlign,
kOptionPredictedJumps = kEncodingOptionPredictedJumps
};
#endif
//! Validation options are used to tell emitters to perform strict validation
//! of instructions passed to \ref emit().
//!
//! \ref BaseAssembler implementation perform by default only basic checks
//! that are necessary to identify all variations of an instruction so the
//! correct encoding can be selected. This is fine for production-ready code
//! as the assembler doesn't have to perform checks that would slow it down.
//! However, sometimes these checks are beneficial especially when the project
//! that uses AsmJit is in a development phase, in which mistakes happen often.
//! To make the experience of using AsmJit seamless it offers validation
//! features that can be controlled by `ValidationOptions`.
enum ValidationOptions : uint32_t {
//! Perform strict validation in \ref BaseAssembler::emit() implementations.
//!
//! This flag ensures that each instruction is checked before it's encoded
//! into a binary representation. This flag is only relevant for \ref
//! BaseAssembler implementations, but can be set in any other emitter type,
//! in that case if that emitter needs to create an assembler on its own,
//! for the purpose of \ref finalize() it would propagate this flag to such
//! assembler so all instructions passed to it are explicitly validated.
//!
//! Default: false.
kValidationOptionAssembler = 0x00000001u,
//! Perform strict validation in \ref BaseBuilder::emit() and \ref
//! BaseCompiler::emit() implementations.
//!
//! This flag ensures that each instruction is checked before an \ref
//! InstNode representing the instruction is created by Builder or Compiler.
//!
//! Default: false.
kValidationOptionIntermediate = 0x00000002u
};
//! \name Construction & Destruction
//! \{
ASMJIT_API explicit BaseEmitter(uint32_t emitterType) noexcept;
ASMJIT_API virtual ~BaseEmitter() noexcept;
//! \}
//! \name Cast
//! \{
template<typename T>
inline T* as() noexcept { return reinterpret_cast<T*>(this); }
template<typename T>
inline const T* as() const noexcept { return reinterpret_cast<const T*>(this); }
//! \}
//! \name Emitter Type & Flags
//! \{
//! Returns the type of this emitter, see `EmitterType`.
inline uint32_t emitterType() const noexcept { return _emitterType; }
//! Returns emitter flags , see `Flags`.
inline uint32_t emitterFlags() const noexcept { return _emitterFlags; }
//! Tests whether the emitter inherits from `BaseAssembler`.
inline bool isAssembler() const noexcept { return _emitterType == kTypeAssembler; }
//! Tests whether the emitter inherits from `BaseBuilder`.
//!
//! \note Both Builder and Compiler emitters would return `true`.
inline bool isBuilder() const noexcept { return _emitterType >= kTypeBuilder; }
//! Tests whether the emitter inherits from `BaseCompiler`.
inline bool isCompiler() const noexcept { return _emitterType == kTypeCompiler; }
//! Tests whether the emitter has the given `flag` enabled.
inline bool hasEmitterFlag(uint32_t flag) const noexcept { return (_emitterFlags & flag) != 0; }
//! Tests whether the emitter is finalized.
inline bool isFinalized() const noexcept { return hasEmitterFlag(kFlagFinalized); }
//! Tests whether the emitter is destroyed (only used during destruction).
inline bool isDestroyed() const noexcept { return hasEmitterFlag(kFlagDestroyed); }
inline void _addEmitterFlags(uint32_t flags) noexcept { _emitterFlags = uint8_t(_emitterFlags | flags); }
inline void _clearEmitterFlags(uint32_t flags) noexcept { _emitterFlags = uint8_t(_emitterFlags & ~flags); }
//! \}
//! \name Target Information
//! \{
//! Returns the CodeHolder this emitter is attached to.
inline CodeHolder* code() const noexcept { return _code; }
//! Returns the target environment, see \ref Environment.
//!
//! The returned \ref Environment reference matches \ref CodeHolder::environment().
inline const Environment& environment() const noexcept { return _environment; }
//! Tests whether the target architecture is 32-bit.
inline bool is32Bit() const noexcept { return environment().is32Bit(); }
//! Tests whether the target architecture is 64-bit.
inline bool is64Bit() const noexcept { return environment().is64Bit(); }
//! Returns the target architecture type.
inline uint32_t arch() const noexcept { return environment().arch(); }
//! Returns the target architecture sub-type.
inline uint32_t subArch() const noexcept { return environment().subArch(); }
//! Returns the target architecture's GP register size (4 or 8 bytes).
inline uint32_t registerSize() const noexcept { return environment().registerSize(); }
//! \}
//! \name Initialization & Finalization
//! \{
//! Tests whether the emitter is initialized (i.e. attached to \ref CodeHolder).
inline bool isInitialized() const noexcept { return _code != nullptr; }
//! Finalizes this emitter.
//!
//! Materializes the content of the emitter by serializing it to the attached
//! \ref CodeHolder through an architecture specific \ref BaseAssembler. This
//! function won't do anything if the emitter inherits from \ref BaseAssembler
//! as assemblers emit directly to a \ref CodeBuffer held by \ref CodeHolder.
//! However, if this is an emitter that inherits from \ref BaseBuilder or \ref
//! BaseCompiler then these emitters need the materialization phase as they
//! store their content in a representation not visible to \ref CodeHolder.
ASMJIT_API virtual Error finalize();
//! \}
//! \name Logging
//! \{
//! Tests whether the emitter has a logger.
inline bool hasLogger() const noexcept { return _logger != nullptr; }
//! Tests whether the emitter has its own logger.
//!
//! Own logger means that it overrides the possible logger that may be used
//! by \ref CodeHolder this emitter is attached to.
inline bool hasOwnLogger() const noexcept { return hasEmitterFlag(kFlagOwnLogger); }
//! Returns the logger this emitter uses.
//!
//! The returned logger is either the emitter's own logger or it's logger
//! used by \ref CodeHolder this emitter is attached to.
inline Logger* logger() const noexcept { return _logger; }
//! Sets or resets the logger of the emitter.
//!
//! If the `logger` argument is non-null then the logger will be considered
//! emitter's own logger, see \ref hasOwnLogger() for more details. If the
//! given `logger` is null then the emitter will automatically use logger
//! that is attached to the \ref CodeHolder this emitter is attached to.
ASMJIT_API void setLogger(Logger* logger) noexcept;
//! Resets the logger of this emitter.
//!
//! The emitter will bail to using a logger attached to \ref CodeHolder this
//! emitter is attached to, or no logger at all if \ref CodeHolder doesn't
//! have one.
inline void resetLogger() noexcept { return setLogger(nullptr); }
//! \}
//! \name Error Handling
//! \{
//! Tests whether the emitter has an error handler attached.
inline bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
//! Tests whether the emitter has its own error handler.
//!
//! Own error handler means that it overrides the possible error handler that
//! may be used by \ref CodeHolder this emitter is attached to.
inline bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(kFlagOwnErrorHandler); }
//! Returns the error handler this emitter uses.
//!
//! The returned error handler is either the emitter's own error handler or
//! it's error handler used by \ref CodeHolder this emitter is attached to.
inline ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
//! Sets or resets the error handler of the emitter.
ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept;
//! Resets the error handler.
inline void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
//! Handles the given error in the following way:
//! 1. If the emitter has \ref ErrorHandler attached, it calls its
//! \ref ErrorHandler::handleError() member function first, and
//! then returns the error. The `handleError()` function may throw.
//! 2. if the emitter doesn't have \ref ErrorHandler, the error is
//! simply returned.
ASMJIT_API Error reportError(Error err, const char* message = nullptr);
//! \}
//! \name Encoding Options
//! \{
//! Returns encoding options, see \ref EncodingOptions.
inline uint32_t encodingOptions() const noexcept { return _encodingOptions; }
//! Tests whether the encoding `option` is set.
inline bool hasEncodingOption(uint32_t option) const noexcept { return (_encodingOptions & option) != 0; }
//! Enables the given encoding `options`, see \ref EncodingOptions.
inline void addEncodingOptions(uint32_t options) noexcept { _encodingOptions |= options; }
//! Disables the given encoding `options`, see \ref EncodingOptions.
inline void clearEncodingOptions(uint32_t options) noexcept { _encodingOptions &= ~options; }
//! \}
//! \name Validation Options
//! \{
//! Returns the emitter's validation options, see \ref ValidationOptions.
inline uint32_t validationOptions() const noexcept {
return _validationOptions;
}
//! Tests whether the given `option` is present in validation options.
inline bool hasValidationOption(uint32_t option) const noexcept {
return (_validationOptions & option) != 0;
}
//! Activates the given validation `options`, see \ref ValidationOptions.
//!
//! This function is used to activate explicit validation options that will
//! be then used by all emitter implementations. There are in general two
//! possibilities:
//!
//! - Architecture specific assembler is used. In this case a
//! \ref kValidationOptionAssembler can be used to turn on explicit
//! validation that will be used before an instruction is emitted.
//! This means that internally an extra step will be performed to
//! make sure that the instruction is correct. This is needed, because
//! by default assemblers prefer speed over strictness.
//!
//! This option should be used in debug builds as it's pretty expensive.
//!
//! - Architecture specific builder or compiler is used. In this case
//! the user can turn on \ref kValidationOptionIntermediate option
//! that adds explicit validation step before the Builder or Compiler
//! creates an \ref InstNode to represent an emitted instruction. Error
//! will be returned if the instruction is ill-formed. In addition,
//! also \ref kValidationOptionAssembler can be used, which would not be
//! consumed by Builder / Compiler directly, but it would be propagated
//! to an architecture specific \ref BaseAssembler implementation it
//! creates during \ref BaseEmitter::finalize().
ASMJIT_API void addValidationOptions(uint32_t options) noexcept;
//! Deactivates the given validation `options`.
//!
//! See \ref addValidationOptions() and \ref ValidationOptions for more details.
ASMJIT_API void clearValidationOptions(uint32_t options) noexcept;
//! \}
//! \name Instruction Options
//! \{
//! Returns forced instruction options.
//!
//! Forced instruction options are merged with next instruction options before
//! the instruction is encoded. These options have some bits reserved that are
//! used by error handling, logging, and instruction validation purposes. Other
//! options are globals that affect each instruction.
inline uint32_t forcedInstOptions() const noexcept { return _forcedInstOptions; }
//! Returns options of the next instruction.
inline uint32_t instOptions() const noexcept { return _instOptions; }
//! Returns options of the next instruction.
inline void setInstOptions(uint32_t options) noexcept { _instOptions = options; }
//! Adds options of the next instruction.
inline void addInstOptions(uint32_t options) noexcept { _instOptions |= options; }
//! Resets options of the next instruction.
inline void resetInstOptions() noexcept { _instOptions = 0; }
//! Tests whether the extra register operand is valid.
inline bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
//! Returns an extra operand that will be used by the next instruction (architecture specific).
inline const RegOnly& extraReg() const noexcept { return _extraReg; }
//! Sets an extra operand that will be used by the next instruction (architecture specific).
inline void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
//! Sets an extra operand that will be used by the next instruction (architecture specific).
inline void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
//! Resets an extra operand that will be used by the next instruction (architecture specific).
inline void resetExtraReg() noexcept { _extraReg.reset(); }
//! Returns comment/annotation of the next instruction.
inline const char* inlineComment() const noexcept { return _inlineComment; }
//! Sets comment/annotation of the next instruction.
//!
//! \note This string is set back to null by `_emit()`, but until that it has
//! to remain valid as the Emitter is not required to make a copy of it (and
//! it would be slow to do that for each instruction).
inline void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Resets the comment/annotation to nullptr.
inline void resetInlineComment() noexcept { _inlineComment = nullptr; }
//! \}
//! \name Sections
//! \{
virtual Error section(Section* section) = 0;
//! \}
//! \name Labels
//! \{
//! Creates a new label.
virtual Label newLabel() = 0;
//! Creates a new named label.
virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) = 0;
//! Creates a new external label.
inline Label newExternalLabel(const char* name, size_t nameSize = SIZE_MAX) {
return newNamedLabel(name, nameSize, Label::kTypeExternal);
}
//! Returns `Label` by `name`.
//!
//! Returns invalid Label in case that the name is invalid or label was not found.
//!
//! \note This function doesn't trigger ErrorHandler in case the name is invalid
//! or no such label exist. You must always check the validity of the `Label` returned.
ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
//! Binds the `label` to the current position of the current section.
//!
//! \note Attempt to bind the same label multiple times will return an error.
virtual Error bind(const Label& label) = 0;
//! Tests whether the label `id` is valid (i.e. registered).
ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept;
//! Tests whether the `label` is valid (i.e. registered).
inline bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); }
//! \}
//! \name Emit
//! \{
// NOTE: These `emit()` helpers are designed to address a code-bloat generated
// by C++ compilers to call a function having many arguments. Each parameter to
// `_emit()` requires some code to pass it, which means that if we default to
// 5 arguments in `_emit()` and instId the C++ compiler would have to generate
// a virtual function call having 5 parameters and additional `this` argument,
// which is quite a lot. Since by default most instructions have 2 to 3 operands
// it's better to introduce helpers that pass from 0 to 6 operands that help to
// reduce the size of emit(...) function call.
//! Emits an instruction (internal).
ASMJIT_API Error _emitI(uint32_t instId);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
//! Emits an instruction `instId` with the given `operands`.
template<typename... Args>
ASMJIT_INLINE Error emit(uint32_t instId, Args&&... operands) {
return _emitI(instId, Support::ForwardOp<Args>::forward(operands)...);
}
inline Error emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount) {
return _emitOpArray(instId, operands, opCount);
}
inline Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) {
setInstOptions(inst.options());
setExtraReg(inst.extraReg());
return _emitOpArray(inst.id(), operands, opCount);
}
//! \cond INTERNAL
//! Emits an instruction - all 6 operands must be defined.
virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* oExt) = 0;
//! Emits instruction having operands stored in array.
ASMJIT_API virtual Error _emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount);
//! \endcond
//! \}
//! \name Emit Utilities
//! \{
ASMJIT_API Error emitProlog(const FuncFrame& frame);
ASMJIT_API Error emitEpilog(const FuncFrame& frame);
ASMJIT_API Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
//! \}
//! \name Align
//! \{
//! Aligns the current CodeBuffer position to the `alignment` specified.
//!
//! The sequence that is used to fill the gap between the aligned location
//! and the current location depends on the align `mode`, see \ref AlignMode.
virtual Error align(uint32_t alignMode, uint32_t alignment) = 0;
//! \}
//! \name Embed
//! \{
//! Embeds raw data into the \ref CodeBuffer.
virtual Error embed(const void* data, size_t dataSize) = 0;
//! Embeds a typed data array.
//!
//! This is the most flexible function for embedding data as it allows to:
//! - Assign a `typeId` to the data, so the emitter knows the type of
//! items stored in `data`. Binary data should use \ref Type::kIdU8.
//! - Repeat the given data `repeatCount` times, so the data can be used
//! as a fill pattern for example, or as a pattern used by SIMD instructions.
virtual Error embedDataArray(uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount = 1) = 0;
//! Embeds int8_t `value` repeated by `repeatCount`.
inline Error embedInt8(int8_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI8, &value, 1, repeatCount); }
//! Embeds uint8_t `value` repeated by `repeatCount`.
inline Error embedUInt8(uint8_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU8, &value, 1, repeatCount); }
//! Embeds int16_t `value` repeated by `repeatCount`.
inline Error embedInt16(int16_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI16, &value, 1, repeatCount); }
//! Embeds uint16_t `value` repeated by `repeatCount`.
inline Error embedUInt16(uint16_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU16, &value, 1, repeatCount); }
//! Embeds int32_t `value` repeated by `repeatCount`.
inline Error embedInt32(int32_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI32, &value, 1, repeatCount); }
//! Embeds uint32_t `value` repeated by `repeatCount`.
inline Error embedUInt32(uint32_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU32, &value, 1, repeatCount); }
//! Embeds int64_t `value` repeated by `repeatCount`.
inline Error embedInt64(int64_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI64, &value, 1, repeatCount); }
//! Embeds uint64_t `value` repeated by `repeatCount`.
inline Error embedUInt64(uint64_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU64, &value, 1, repeatCount); }
//! Embeds a floating point `value` repeated by `repeatCount`.
inline Error embedFloat(float value, size_t repeatCount = 1) { return embedDataArray(Type::kIdF32, &value, 1, repeatCount); }
//! Embeds a floating point `value` repeated by `repeatCount`.
inline Error embedDouble(double value, size_t repeatCount = 1) { return embedDataArray(Type::IdOfT<double>::kTypeId, &value, 1, repeatCount); }
//! Embeds a constant pool at the current offset by performing the following:
//! 1. Aligns by using kAlignData to the minimum `pool` alignment.
//! 2. Binds the ConstPool label so it's bound to an aligned location.
//! 3. Emits ConstPool content.
virtual Error embedConstPool(const Label& label, const ConstPool& pool) = 0;
//! Embeds an absolute `label` address as data.
//!
//! The `dataSize` is an optional argument that can be used to specify the
//! size of the address data. If it's zero (default) the address size is
//! deduced from the target architecture (either 4 or 8 bytes).
virtual Error embedLabel(const Label& label, size_t dataSize = 0) = 0;
//! Embeds a delta (distance) between the `label` and `base` calculating it
//! as `label - base`. This function was designed to make it easier to embed
//! lookup tables where each index is a relative distance of two labels.
virtual Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) = 0;
//! \}
//! \name Comment
//! \{
//! Emits a comment stored in `data` with an optional `size` parameter.
virtual Error comment(const char* data, size_t size = SIZE_MAX) = 0;
//! Emits a formatted comment specified by `fmt` and variable number of arguments.
ASMJIT_API Error commentf(const char* fmt, ...);
//! Emits a formatted comment specified by `fmt` and `ap`.
ASMJIT_API Error commentv(const char* fmt, va_list ap);
//! \}
//! \name Events
//! \{
//! Called after the emitter was attached to `CodeHolder`.
virtual Error onAttach(CodeHolder* code) noexcept = 0;
//! Called after the emitter was detached from `CodeHolder`.
virtual Error onDetach(CodeHolder* code) noexcept = 0;
//! Called when \ref CodeHolder has updated an important setting, which
//! involves the following:
//!
//! - \ref Logger has been changed (\ref CodeHolder::setLogger() has been
//! called).
//! - \ref ErrorHandler has been changed (\ref CodeHolder::setErrorHandler()
//! has been called).
//!
//! This function ensures that the settings are properly propagated from
//! \ref CodeHolder to the emitter.
//!
//! \note This function is virtual and can be overridden, however, if you
//! do so, always call \ref BaseEmitter::onSettingsUpdated() within your
//! own implementation to ensure that the emitter is in a consisten state.
ASMJIT_API virtual void onSettingsUpdated() noexcept;
//! \}
#ifndef ASMJIT_NO_DEPRECATED
ASMJIT_DEPRECATED("Use environment() instead")
inline CodeInfo codeInfo() const noexcept {
return CodeInfo(_environment, _code ? _code->baseAddress() : Globals::kNoBaseAddress);
}
ASMJIT_DEPRECATED("Use arch() instead")
inline uint32_t archId() const noexcept { return arch(); }
ASMJIT_DEPRECATED("Use registerSize() instead")
inline uint32_t gpSize() const noexcept { return registerSize(); }
ASMJIT_DEPRECATED("Use encodingOptions() instead")
inline uint32_t emitterOptions() const noexcept { return encodingOptions(); }
ASMJIT_DEPRECATED("Use addEncodingOptions() instead")
inline void addEmitterOptions(uint32_t options) noexcept { addEncodingOptions(options); }
ASMJIT_DEPRECATED("Use clearEncodingOptions() instead")
inline void clearEmitterOptions(uint32_t options) noexcept { clearEncodingOptions(options); }
ASMJIT_DEPRECATED("Use forcedInstOptions() instead")
inline uint32_t globalInstOptions() const noexcept { return forcedInstOptions(); }
#endif // !ASMJIT_NO_DEPRECATED
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_EMITTER_H_INCLUDED

View File

@ -0,0 +1,150 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/assembler.h"
#include "../core/emitterutils_p.h"
#include "../core/formatter.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::EmitterUtils]
// ============================================================================
namespace EmitterUtils {
#ifndef ASMJIT_NO_LOGGING
Error formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept {
size_t currentSize = sb.size();
size_t commentSize = comment ? Support::strLen(comment, Globals::kMaxCommentSize) : 0;
ASMJIT_ASSERT(binSize >= dispSize);
const size_t kNoBinSize = SIZE_MAX;
if ((binSize != 0 && binSize != kNoBinSize) || commentSize) {
size_t align = kMaxInstLineSize;
char sep = ';';
for (size_t i = (binSize == kNoBinSize); i < 2; i++) {
size_t begin = sb.size();
ASMJIT_PROPAGATE(sb.padEnd(align));
if (sep) {
ASMJIT_PROPAGATE(sb.append(sep));
ASMJIT_PROPAGATE(sb.append(' '));
}
// Append binary data or comment.
if (i == 0) {
ASMJIT_PROPAGATE(sb.appendHex(binData, binSize - dispSize - immSize));
ASMJIT_PROPAGATE(sb.appendChars('.', dispSize * 2));
ASMJIT_PROPAGATE(sb.appendHex(binData + binSize - immSize, immSize));
if (commentSize == 0) break;
}
else {
ASMJIT_PROPAGATE(sb.append(comment, commentSize));
}
currentSize += sb.size() - begin;
align += kMaxBinarySize;
sep = '|';
}
}
return sb.append('\n');
}
void logLabelBound(BaseAssembler* self, const Label& label) noexcept {
Logger* logger = self->logger();
StringTmp<512> sb;
size_t binSize = logger->hasFlag(FormatOptions::kFlagMachineCode) ? size_t(0) : SIZE_MAX;
sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationLabel));
Formatter::formatLabel(sb, logger->flags(), self, label.id());
sb.append(':');
EmitterUtils::formatLine(sb, nullptr, binSize, 0, 0, self->_inlineComment);
logger->log(sb.data(), sb.size());
}
void logInstructionEmitted(
BaseAssembler* self,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
uint32_t relSize, uint32_t immSize, uint8_t* afterCursor) {
Logger* logger = self->logger();
ASMJIT_ASSERT(logger != nullptr);
StringTmp<256> sb;
uint32_t flags = logger->flags();
uint8_t* beforeCursor = self->bufferPtr();
intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor);
Operand_ opArray[Globals::kMaxOpCount];
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationCode));
Formatter::formatInstruction(sb, flags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
if ((flags & FormatOptions::kFlagMachineCode) != 0)
EmitterUtils::formatLine(sb, self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment());
else
EmitterUtils::formatLine(sb, nullptr, SIZE_MAX, 0, 0, self->inlineComment());
logger->log(sb);
}
Error logInstructionFailed(
BaseAssembler* self,
Error err,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
StringTmp<256> sb;
sb.append(DebugUtils::errorAsString(err));
sb.append(": ");
Operand_ opArray[Globals::kMaxOpCount];
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
Formatter::formatInstruction(sb, 0, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
if (self->inlineComment()) {
sb.append(" ; ");
sb.append(self->inlineComment());
}
self->resetInstOptions();
self->resetExtraReg();
self->resetInlineComment();
return self->reportError(err, sb.data());
}
#endif
} // {EmitterUtils}
ASMJIT_END_NAMESPACE

View File

@ -0,0 +1,109 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
#define ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
#include "../core/emitter.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
class BaseAssembler;
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::EmitterUtils]
// ============================================================================
namespace EmitterUtils {
static const Operand_ noExt[3] {};
enum kOpIndex {
kOp3 = 0,
kOp4 = 1,
kOp5 = 2
};
static ASMJIT_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
uint32_t opCount = 0;
if (opExt[kOp3].isNone()) {
if (!o0.isNone()) opCount = 1;
if (!o1.isNone()) opCount = 2;
if (!o2.isNone()) opCount = 3;
}
else {
opCount = 4;
if (!opExt[kOp4].isNone()) {
opCount = 5 + uint32_t(!opExt[kOp5].isNone());
}
}
return opCount;
}
static ASMJIT_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
dst[0].copyFrom(o0);
dst[1].copyFrom(o1);
dst[2].copyFrom(o2);
dst[3].copyFrom(opExt[kOp3]);
dst[4].copyFrom(opExt[kOp4]);
dst[5].copyFrom(opExt[kOp5]);
}
#ifndef ASMJIT_NO_LOGGING
enum : uint32_t {
// Has to be big to be able to hold all metadata compiler can assign to a
// single instruction.
kMaxInstLineSize = 44,
kMaxBinarySize = 26
};
Error formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept;
void logLabelBound(BaseAssembler* self, const Label& label) noexcept;
void logInstructionEmitted(
BaseAssembler* self,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
uint32_t relSize, uint32_t immSize, uint8_t* afterCursor);
Error logInstructionFailed(
BaseAssembler* self,
Error err, uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt);
#endif
}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED

View File

@ -0,0 +1,64 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/environment.h"
ASMJIT_BEGIN_NAMESPACE
// X86 Target
// ----------
//
// - 32-bit - Linux, OSX, BSD, and apparently also Haiku guarantee 16-byte
// stack alignment. Other operating systems are assumed to have
// 4-byte alignment by default for safety reasons.
// - 64-bit - stack must be aligned to 16 bytes.
//
// ARM Target
// ----------
//
// - 32-bit - Stack must be aligned to 8 bytes.
// - 64-bit - Stack must be aligned to 16 bytes (hardware requirement).
uint32_t Environment::stackAlignment() const noexcept {
if (is64Bit()) {
// Assume 16-byte alignment on any 64-bit target.
return 16;
}
else {
// The following platforms use 16-byte alignment in 32-bit mode.
if (isPlatformLinux() ||
isPlatformBSD() ||
isPlatformApple() ||
isPlatformHaiku()) {
return 16u;
}
if (isFamilyARM())
return 8;
// Bail to 4-byte alignment if we don't know.
return 4;
}
}
ASMJIT_END_NAMESPACE

View File

@ -0,0 +1,612 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
#define ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
#include "../core/globals.h"
#if defined(__APPLE__)
#include <TargetConditionals.h>
#endif
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::Environment]
// ============================================================================
//! Represents an environment, which is usually related to a \ref Target.
//!
//! Environment has usually an 'arch-subarch-vendor-os-abi' format, which is
//! sometimes called "Triple" (historically it used to be 3 only parts) or
//! "Tuple", which is a convention used by Debian Linux.
//!
//! AsmJit doesn't support all possible combinations or architectures and ABIs,
//! however, it models the environment similarly to other compilers for future
//! extensibility.
class Environment {
public:
//! Architecture type, see \ref Arch.
uint8_t _arch;
//! Sub-architecture type, see \ref SubArch.
uint8_t _subArch;
//! Vendor type, see \ref Vendor.
uint8_t _vendor;
//! Platform type, see \ref Platform.
uint8_t _platform;
//! ABI type, see \ref Abi.
uint8_t _abi;
//! Object format, see \ref Format.
uint8_t _format;
//! Reserved for future use, must be zero.
uint16_t _reserved;
//! Architecture.
enum Arch : uint32_t {
//! Unknown or uninitialized architecture.
kArchUnknown = 0,
//! Mask used by 32-bit architectures (odd are 32-bit, even are 64-bit).
kArch32BitMask = 0x01,
//! Mask used by big-endian architectures.
kArchBigEndianMask = 0x80u,
//! 32-bit X86 architecture.
kArchX86 = 1,
//! 64-bit X86 architecture also known as X86_64 and AMD64.
kArchX64 = 2,
//! 32-bit RISC-V architecture.
kArchRISCV32 = 3,
//! 64-bit RISC-V architecture.
kArchRISCV64 = 4,
//! 32-bit ARM architecture (little endian).
kArchARM = 5,
//! 32-bit ARM architecture (big endian).
kArchARM_BE = kArchARM | kArchBigEndianMask,
//! 64-bit ARM architecture in (little endian).
kArchAArch64 = 6,
//! 64-bit ARM architecture in (big endian).
kArchAArch64_BE = kArchAArch64 | kArchBigEndianMask,
//! 32-bit ARM in Thumb mode (little endian).
kArchThumb = 7,
//! 32-bit ARM in Thumb mode (big endian).
kArchThumb_BE = kArchThumb | kArchBigEndianMask,
// 8 is not used, even numbers are 64-bit architectures.
//! 32-bit MIPS architecture in (little endian).
kArchMIPS32_LE = 9,
//! 32-bit MIPS architecture in (big endian).
kArchMIPS32_BE = kArchMIPS32_LE | kArchBigEndianMask,
//! 64-bit MIPS architecture in (little endian).
kArchMIPS64_LE = 10,
//! 64-bit MIPS architecture in (big endian).
kArchMIPS64_BE = kArchMIPS64_LE | kArchBigEndianMask,
//! Count of architectures.
kArchCount = 11
};
//! Sub-architecture.
enum SubArch : uint32_t {
//! Unknown or uninitialized architecture sub-type.
kSubArchUnknown = 0,
//! Count of sub-architectures.
kSubArchCount
};
//! Vendor.
//!
//! \note AsmJit doesn't use vendor information at the moment. It's provided
//! for future use, if required.
enum Vendor : uint32_t {
//! Unknown or uninitialized vendor.
kVendorUnknown = 0,
//! Count of vendor identifiers.
kVendorCount
};
//! Platform / OS.
enum Platform : uint32_t {
//! Unknown or uninitialized platform.
kPlatformUnknown = 0,
//! Windows OS.
kPlatformWindows,
//! Other platform, most likely POSIX based.
kPlatformOther,
//! Linux OS.
kPlatformLinux,
//! GNU/Hurd OS.
kPlatformHurd,
//! FreeBSD OS.
kPlatformFreeBSD,
//! OpenBSD OS.
kPlatformOpenBSD,
//! NetBSD OS.
kPlatformNetBSD,
//! DragonFly BSD OS.
kPlatformDragonFlyBSD,
//! Haiku OS.
kPlatformHaiku,
//! Apple OSX.
kPlatformOSX,
//! Apple iOS.
kPlatformIOS,
//! Apple TVOS.
kPlatformTVOS,
//! Apple WatchOS.
kPlatformWatchOS,
//! Emscripten platform.
kPlatformEmscripten,
//! Count of platform identifiers.
kPlatformCount
};
//! ABI.
enum Abi : uint32_t {
//! Unknown or uninitialied environment.
kAbiUnknown = 0,
//! Microsoft ABI.
kAbiMSVC,
//! GNU ABI.
kAbiGNU,
//! Android Environment / ABI.
kAbiAndroid,
//! Cygwin ABI.
kAbiCygwin,
//! Count of known ABI types.
kAbiCount
};
//! Object format.
//!
//! \note AsmJit doesn't really use anything except \ref kFormatUnknown and
//! \ref kFormatJIT at the moment. Object file formats are provided for
//! future extensibility and a possibility to generate object files at some
//! point.
enum Format : uint32_t {
//! Unknown or uninitialized object format.
kFormatUnknown = 0,
//! JIT code generation object, most likely \ref JitRuntime or a custom
//! \ref Target implementation.
kFormatJIT,
//! Executable and linkable format (ELF).
kFormatELF,
//! Common object file format.
kFormatCOFF,
//! Extended COFF object format.
kFormatXCOFF,
//! Mach object file format.
kFormatMachO,
//! Count of object format types.
kFormatCount
};
//! \name Environment Detection
//! \{
#ifdef _DOXYGEN
//! Architecture detected at compile-time (architecture of the host).
static constexpr Arch kArchHost = DETECTED_AT_COMPILE_TIME;
//! Sub-architecture detected at compile-time (sub-architecture of the host).
static constexpr SubArch kSubArchHost = DETECTED_AT_COMPILE_TIME;
//! Vendor detected at compile-time (vendor of the host).
static constexpr Vendor kVendorHost = DETECTED_AT_COMPILE_TIME;
//! Platform detected at compile-time (platform of the host).
static constexpr Platform kPlatformHost = DETECTED_AT_COMPILE_TIME;
//! ABI detected at compile-time (ABI of the host).
static constexpr Abi kAbiHost = DETECTED_AT_COMPILE_TIME;
#else
static constexpr Arch kArchHost =
ASMJIT_ARCH_X86 == 32 ? kArchX86 :
ASMJIT_ARCH_X86 == 64 ? kArchX64 :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_LE ? kArchARM :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_BE ? kArchARM_BE :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_LE ? kArchAArch64 :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_BE ? kArchAArch64_BE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_LE ? kArchMIPS32_LE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_BE ? kArchMIPS32_BE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_LE ? kArchMIPS64_LE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_BE ? kArchMIPS64_BE :
kArchUnknown;
static constexpr SubArch kSubArchHost =
kSubArchUnknown;
static constexpr Vendor kVendorHost =
kVendorUnknown;
static constexpr Platform kPlatformHost =
#if defined(__EMSCRIPTEN__)
kPlatformEmscripten
#elif defined(_WIN32)
kPlatformWindows
#elif defined(__linux__)
kPlatformLinux
#elif defined(__gnu_hurd__)
kPlatformHurd
#elif defined(__FreeBSD__)
kPlatformFreeBSD
#elif defined(__OpenBSD__)
kPlatformOpenBSD
#elif defined(__NetBSD__)
kPlatformNetBSD
#elif defined(__DragonFly__)
kPlatformDragonFlyBSD
#elif defined(__HAIKU__)
kPlatformHaiku
#elif defined(__APPLE__) && TARGET_OS_OSX
kPlatformOSX
#elif defined(__APPLE__) && TARGET_OS_TV
kPlatformTVOS
#elif defined(__APPLE__) && TARGET_OS_WATCH
kPlatformWatchOS
#elif defined(__APPLE__) && TARGET_OS_IPHONE
kPlatformIOS
#else
kPlatformOther
#endif
;
static constexpr Abi kAbiHost =
#if defined(_MSC_VER)
kAbiMSVC
#elif defined(__CYGWIN__)
kAbiCygwin
#elif defined(__MINGW32__) || defined(__GLIBC__)
kAbiGNU
#elif defined(__ANDROID__)
kAbiAndroid
#else
kAbiUnknown
#endif
;
#endif
//! \}
//! \name Construction / Destruction
//! \{
inline Environment() noexcept :
_arch(uint8_t(kArchUnknown)),
_subArch(uint8_t(kSubArchUnknown)),
_vendor(uint8_t(kVendorUnknown)),
_platform(uint8_t(kPlatformUnknown)),
_abi(uint8_t(kAbiUnknown)),
_format(uint8_t(kFormatUnknown)),
_reserved(0) {}
inline Environment(const Environment& other) noexcept = default;
inline explicit Environment(uint32_t arch,
uint32_t subArch = kSubArchUnknown,
uint32_t vendor = kVendorUnknown,
uint32_t platform = kPlatformUnknown,
uint32_t abi = kAbiUnknown,
uint32_t format = kFormatUnknown) noexcept {
init(arch, subArch, vendor, platform, abi, format);
}
//! \}
//! \name Overloaded Operators
//! \{
inline Environment& operator=(const Environment& other) noexcept = default;
inline bool operator==(const Environment& other) const noexcept { return equals(other); }
inline bool operator!=(const Environment& other) const noexcept { return !equals(other); }
//! \}
//! \name Accessors
//! \{
//! Tests whether the environment is not set up.
//!
//! Returns true if all members are zero, and thus unknown.
inline bool empty() const noexcept {
// Unfortunately compilers won't optimize fields are checked one by one...
return _packed() == 0;
}
//! Tests whether the environment is intialized, which means it must have
//! a valid architecture.
inline bool isInitialized() const noexcept {
return _arch != kArchUnknown;
}
inline uint64_t _packed() const noexcept {
uint64_t x;
memcpy(&x, this, 8);
return x;
}
//! Resets all members of the environment to zero / unknown.
inline void reset() noexcept {
_arch = uint8_t(kArchUnknown);
_subArch = uint8_t(kSubArchUnknown);
_vendor = uint8_t(kVendorUnknown);
_platform = uint8_t(kPlatformUnknown);
_abi = uint8_t(kAbiUnknown);
_format = uint8_t(kFormatUnknown);
_reserved = 0;
}
inline bool equals(const Environment& other) const noexcept {
return _packed() == other._packed();
}
//! Returns the architecture, see \ref Arch.
inline uint32_t arch() const noexcept { return _arch; }
//! Returns the sub-architecture, see \ref SubArch.
inline uint32_t subArch() const noexcept { return _subArch; }
//! Returns vendor, see \ref Vendor.
inline uint32_t vendor() const noexcept { return _vendor; }
//! Returns target's platform or operating system, see \ref Platform.
inline uint32_t platform() const noexcept { return _platform; }
//! Returns target's ABI, see \ref Abi.
inline uint32_t abi() const noexcept { return _abi; }
//! Returns target's object format, see \ref Format.
inline uint32_t format() const noexcept { return _format; }
inline void init(uint32_t arch,
uint32_t subArch = kSubArchUnknown,
uint32_t vendor = kVendorUnknown,
uint32_t platform = kPlatformUnknown,
uint32_t abi = kAbiUnknown,
uint32_t format = kFormatUnknown) noexcept {
_arch = uint8_t(arch);
_subArch = uint8_t(subArch);
_vendor = uint8_t(vendor);
_platform = uint8_t(platform);
_abi = uint8_t(abi);
_format = uint8_t(format);
_reserved = 0;
}
inline bool isArchX86() const noexcept { return _arch == kArchX86; }
inline bool isArchX64() const noexcept { return _arch == kArchX64; }
inline bool isArchRISCV32() const noexcept { return _arch == kArchRISCV32; }
inline bool isArchRISCV64() const noexcept { return _arch == kArchRISCV64; }
inline bool isArchARM() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchARM; }
inline bool isArchThumb() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchThumb; }
inline bool isArchAArch64() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchAArch64; }
inline bool isArchMIPS32() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchMIPS32_LE; }
inline bool isArchMIPS64() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchMIPS64_LE; }
//! Tests whether the architecture is 32-bit.
inline bool is32Bit() const noexcept { return is32Bit(_arch); }
//! Tests whether the architecture is 64-bit.
inline bool is64Bit() const noexcept { return is64Bit(_arch); }
//! Tests whether the architecture is little endian.
inline bool isLittleEndian() const noexcept { return isLittleEndian(_arch); }
//! Tests whether the architecture is big endian.
inline bool isBigEndian() const noexcept { return isBigEndian(_arch); }
//! Tests whether this architecture is of X86 family.
inline bool isFamilyX86() const noexcept { return isFamilyX86(_arch); }
//! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit).
inline bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); }
//! Tests whether this architecture family is ARM, Thumb, or AArch64.
inline bool isFamilyARM() const noexcept { return isFamilyARM(_arch); }
//! Tests whether this architecture family is MISP or MIPS64.
inline bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); }
//! Tests whether the environment platform is Windows.
inline bool isPlatformWindows() const noexcept { return _platform == kPlatformWindows; }
//! Tests whether the environment platform is Linux.
inline bool isPlatformLinux() const noexcept { return _platform == kPlatformLinux; }
//! Tests whether the environment platform is Hurd.
inline bool isPlatformHurd() const noexcept { return _platform == kPlatformHurd; }
//! Tests whether the environment platform is Haiku.
inline bool isPlatformHaiku() const noexcept { return _platform == kPlatformHaiku; }
//! Tests whether the environment platform is any BSD.
inline bool isPlatformBSD() const noexcept {
return _platform == kPlatformFreeBSD ||
_platform == kPlatformOpenBSD ||
_platform == kPlatformNetBSD ||
_platform == kPlatformDragonFlyBSD;
}
//! Tests whether the environment platform is any Apple platform (OSX, iOS, TVOS, WatchOS).
inline bool isPlatformApple() const noexcept {
return _platform == kPlatformOSX ||
_platform == kPlatformIOS ||
_platform == kPlatformTVOS ||
_platform == kPlatformWatchOS;
}
//! Tests whether the ABI is MSVC.
inline bool isAbiMSVC() const noexcept { return _abi == kAbiMSVC; }
//! Tests whether the ABI is GNU.
inline bool isAbiGNU() const noexcept { return _abi == kAbiGNU; }
//! Returns a calculated stack alignment for this environment.
ASMJIT_API uint32_t stackAlignment() const noexcept;
//! Returns a native register size of this architecture.
uint32_t registerSize() const noexcept { return registerSizeFromArch(_arch); }
//! Sets the architecture to `arch`.
inline void setArch(uint32_t arch) noexcept { _arch = uint8_t(arch); }
//! Sets the sub-architecture to `subArch`.
inline void setSubArch(uint32_t subArch) noexcept { _subArch = uint8_t(subArch); }
//! Sets the vendor to `vendor`.
inline void setVendor(uint32_t vendor) noexcept { _vendor = uint8_t(vendor); }
//! Sets the platform to `platform`.
inline void setPlatform(uint32_t platform) noexcept { _platform = uint8_t(platform); }
//! Sets the ABI to `abi`.
inline void setAbi(uint32_t abi) noexcept { _abi = uint8_t(abi); }
//! Sets the object format to `format`.
inline void setFormat(uint32_t format) noexcept { _format = uint8_t(format); }
//! \}
//! \name Static Utilities
//! \{
static inline bool isValidArch(uint32_t arch) noexcept {
return (arch & ~kArchBigEndianMask) != 0 &&
(arch & ~kArchBigEndianMask) < kArchCount;
}
//! Tests whether the given architecture `arch` is 32-bit.
static inline bool is32Bit(uint32_t arch) noexcept {
return (arch & kArch32BitMask) == kArch32BitMask;
}
//! Tests whether the given architecture `arch` is 64-bit.
static inline bool is64Bit(uint32_t arch) noexcept {
return (arch & kArch32BitMask) == 0;
}
//! Tests whether the given architecture `arch` is little endian.
static inline bool isLittleEndian(uint32_t arch) noexcept {
return (arch & kArchBigEndianMask) == 0;
}
//! Tests whether the given architecture `arch` is big endian.
static inline bool isBigEndian(uint32_t arch) noexcept {
return (arch & kArchBigEndianMask) == kArchBigEndianMask;
}
//! Tests whether the given architecture is AArch64.
static inline bool isArchAArch64(uint32_t arch) noexcept {
arch &= ~kArchBigEndianMask;
return arch == kArchAArch64;
}
//! Tests whether the given architecture family is X86 or X64.
static inline bool isFamilyX86(uint32_t arch) noexcept {
return arch == kArchX86 ||
arch == kArchX64;
}
//! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit).
static inline bool isFamilyRISCV(uint32_t arch) noexcept {
return arch == kArchRISCV32 ||
arch == kArchRISCV64;
}
//! Tests whether the given architecture family is ARM, Thumb, or AArch64.
static inline bool isFamilyARM(uint32_t arch) noexcept {
arch &= ~kArchBigEndianMask;
return arch == kArchARM ||
arch == kArchAArch64 ||
arch == kArchThumb;
}
//! Tests whether the given architecture family is MISP or MIPS64.
static inline bool isFamilyMIPS(uint32_t arch) noexcept {
arch &= ~kArchBigEndianMask;
return arch == kArchMIPS32_LE ||
arch == kArchMIPS64_LE;
}
//! Returns a native general purpose register size from the given architecture.
static uint32_t registerSizeFromArch(uint32_t arch) noexcept {
return is32Bit(arch) ? 4u : 8u;
}
//! \}
};
//! Returns the host environment constructed from preprocessor macros defined
//! by the compiler.
//!
//! The returned environment should precisely match the target host architecture,
//! sub-architecture, platform, and ABI.
static ASMJIT_INLINE Environment hostEnvironment() noexcept {
return Environment(Environment::kArchHost,
Environment::kSubArchHost,
Environment::kVendorHost,
Environment::kPlatformHost,
Environment::kAbiHost,
Environment::kFormatUnknown);
}
static_assert(sizeof(Environment) == 8,
"Environment must occupy exactly 8 bytes.");
//! \}
#ifndef ASMJIT_NO_DEPRECATED
class ASMJIT_DEPRECATED_STRUCT("Use Environment instead") ArchInfo : public Environment {
public:
inline ArchInfo() noexcept : Environment() {}
inline ArchInfo(const Environment& other) noexcept : Environment(other) {}
inline explicit ArchInfo(uint32_t arch, uint32_t subArch = kSubArchUnknown) noexcept
: Environment(arch, subArch) {}
enum Id : uint32_t {
kIdNone = Environment::kArchUnknown,
kIdX86 = Environment::kArchX86,
kIdX64 = Environment::kArchX64,
kIdA32 = Environment::kArchARM,
kIdA64 = Environment::kArchAArch64,
kIdHost = Environment::kArchHost
};
enum SubType : uint32_t {
kSubIdNone = Environment::kSubArchUnknown
};
static inline ArchInfo host() noexcept { return ArchInfo(hostEnvironment()); }
};
#endif // !ASMJIT_NO_DEPRECATED
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ENVIRONMENT_H_INCLUDED

View File

@ -0,0 +1,37 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/errorhandler.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ErrorHandler]
// ============================================================================
ErrorHandler::ErrorHandler() noexcept {}
ErrorHandler::~ErrorHandler() noexcept {}
ASMJIT_END_NAMESPACE

View File

@ -0,0 +1,267 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
#define ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
#include "../core/globals.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_error_handling
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class BaseEmitter;
// ============================================================================
// [asmjit::ErrorHandler]
// ============================================================================
//! Error handler can be used to override the default behavior of error handling.
//!
//! It's available to all classes that inherit `BaseEmitter`. Override
//! \ref ErrorHandler::handleError() to implement your own error handler.
//!
//! The following use-cases are supported:
//!
//! - Record the error and continue code generation. This is the simplest
//! approach that can be used to at least log possible errors.
//! - Throw an exception. AsmJit doesn't use exceptions and is completely
//! exception-safe, but it's perfectly legal to throw an exception from
//! the error handler.
//! - Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts Assembler,
//! Builder and Compiler to a consistent state before calling \ref handleError(),
//! so `longjmp()` can be used without issues to cancel the code-generation if
//! an error occurred. This method can be used if exception handling in your
//! project is turned off and you still want some comfort. In most cases it
//! should be safe as AsmJit uses \ref Zone memory and the ownership of memory
//! it allocates always ends with the instance that allocated it. If using this
//! approach please never jump outside the life-time of \ref CodeHolder and
//! \ref BaseEmitter.
//!
//! \ref ErrorHandler can be attached to \ref CodeHolder or \ref BaseEmitter,
//! which has a priority. The example below uses error handler that just prints
//! the error, but lets AsmJit continue:
//!
//! ```
//! // Error Handling #1 - Logging and returing Error.
//! #include <asmjit/x86.h>
//! #include <stdio.h>
//!
//! using namespace asmjit;
//!
//! // Error handler that just prints the error and lets AsmJit ignore it.
//! class SimpleErrorHandler : public ErrorHandler {
//! public:
//! Error err;
//!
//! inline SimpleErrorHandler() : err(kErrorOk) {}
//!
//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
//! this->err = err;
//! fprintf(stderr, "ERROR: %s\n", message);
//! }
//! };
//!
//! int main() {
//! JitRuntime rt;
//! SimpleErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment());
//! code.setErrorHandler(&eh);
//!
//! // Try to emit instruction that doesn't exist.
//! x86::Assembler a(&code);
//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
//!
//! if (eh.err) {
//! // Assembler failed!
//! return 1;
//! }
//!
//! return 0;
//! }
//! ```
//!
//! If error happens during instruction emitting / encoding the assembler behaves
//! transactionally - the output buffer won't advance if encoding failed, thus
//! either a fully encoded instruction or nothing is emitted. The error handling
//! shown above is useful, but it's still not the best way of dealing with errors
//! in AsmJit. The following example shows how to use exception handling to handle
//! errors in a more C++ way:
//!
//! ```
//! // Error Handling #2 - Throwing an exception.
//! #include <asmjit/x86.h>
//! #include <exception>
//! #include <string>
//! #include <stdio.h>
//!
//! using namespace asmjit;
//!
//! // Error handler that throws a user-defined `AsmJitException`.
//! class AsmJitException : public std::exception {
//! public:
//! Error err;
//! std::string message;
//!
//! AsmJitException(Error err, const char* message) noexcept
//! : err(err),
//! message(message) {}
//!
//! const char* what() const noexcept override { return message.c_str(); }
//! };
//!
//! class ThrowableErrorHandler : public ErrorHandler {
//! public:
//! // Throw is possible, functions that use ErrorHandler are never 'noexcept'.
//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
//! throw AsmJitException(err, message);
//! }
//! };
//!
//! int main() {
//! JitRuntime rt;
//! ThrowableErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment());
//! code.setErrorHandler(&eh);
//!
//! x86::Assembler a(&code);
//!
//! // Try to emit instruction that doesn't exist.
//! try {
//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
//! }
//! catch (const AsmJitException& ex) {
//! printf("EXCEPTION THROWN: %s\n", ex.what());
//! return 1;
//! }
//!
//! return 0;
//! }
//! ```
//!
//! If C++ exceptions are not what you like or your project turns off them
//! completely there is still a way of reducing the error handling to a minimum
//! by using a standard setjmp/longjmp approach. AsmJit is exception-safe and
//! cleans up everything before calling the ErrorHandler, so any approach is
//! safe. You can simply jump from the error handler without causing any
//! side-effects or memory leaks. The following example demonstrates how it
//! could be done:
//!
//! ```
//! // Error Handling #3 - Using setjmp/longjmp if exceptions are not allowed.
//! #include <asmjit/x86.h>
//! #include <setjmp.h>
//! #include <stdio.h>
//!
//! class LongJmpErrorHandler : public asmjit::ErrorHandler {
//! public:
//! inline LongJmpErrorHandler() : err(asmjit::kErrorOk) {}
//!
//! void handleError(asmjit::Error err, const char* message, asmjit::BaseEmitter* origin) override {
//! this->err = err;
//! longjmp(state, 1);
//! }
//!
//! jmp_buf state;
//! asmjit::Error err;
//! };
//!
//! int main(int argc, char* argv[]) {
//! using namespace asmjit;
//!
//! JitRuntime rt;
//! LongJmpErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.rt.environment());
//! code.setErrorHandler(&eh);
//!
//! x86::Assembler a(&code);
//!
//! if (!setjmp(eh.state)) {
//! // Try to emit instruction that doesn't exist.
//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
//! }
//! else {
//! Error err = eh.err;
//! printf("ASMJIT ERROR: 0x%08X [%s]\n", err, DebugUtils::errorAsString(err));
//! }
//!
//! return 0;
//! }
//! ```
class ASMJIT_VIRTAPI ErrorHandler {
public:
ASMJIT_BASE_CLASS(ErrorHandler)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Creates a new `ErrorHandler` instance.
ASMJIT_API ErrorHandler() noexcept;
//! Destroys the `ErrorHandler` instance.
ASMJIT_API virtual ~ErrorHandler() noexcept;
// --------------------------------------------------------------------------
// [Handle Error]
// --------------------------------------------------------------------------
//! Error handler (must be reimplemented).
//!
//! Error handler is called after an error happened and before it's propagated
//! to the caller. There are multiple ways how the error handler can be used:
//!
//! 1. User-based error handling without throwing exception or using C's
//! `longjmp()`. This is for users that don't use exceptions and want
//! customized error handling.
//!
//! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely
//! exception-safe, but you can throw exception from your error handler if
//! this way is the preferred way of handling errors in your project.
//!
//! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts
//! `BaseEmitter` to a consistent state before calling `handleError()`
//! so `longjmp()` can be used without any issues to cancel the code
//! generation if an error occurred. There is no difference between
//! exceptions and `longjmp()` from AsmJit's perspective, however,
//! never jump outside of `CodeHolder` and `BaseEmitter` scope as you
//! would leak memory.
virtual void handleError(Error err, const char* message, BaseEmitter* origin) = 0;
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ERRORHANDLER_H_INCLUDED

186
deps/asmjit/src/asmjit/core/features.h vendored Normal file
View File

@ -0,0 +1,186 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_FEATURES_H_INCLUDED
#define ASMJIT_CORE_FEATURES_H_INCLUDED
#include "../core/globals.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::BaseFeatures]
// ============================================================================
//! Base class that provides information about CPU features.
//!
//! Internally each feature is represented by a single bit in an embedded
//! bit-array, however, feature bits are defined by an architecture specific
//! implementations, like \ref x86::Features.
class BaseFeatures {
public:
typedef Support::BitWord BitWord;
typedef Support::BitVectorIterator<BitWord> Iterator;
enum : uint32_t {
kMaxFeatures = 128,
kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits
};
BitWord _bits[kNumBitWords];
//! \name Construction & Destruction
//! \{
inline BaseFeatures() noexcept { reset(); }
inline BaseFeatures(const BaseFeatures& other) noexcept = default;
inline explicit BaseFeatures(Globals::NoInit_) noexcept {}
inline void reset() noexcept {
for (size_t i = 0; i < kNumBitWords; i++)
_bits[i] = 0;
}
//! \}
//! \name Overloaded Operators
//! \{
inline BaseFeatures& operator=(const BaseFeatures& other) noexcept = default;
inline bool operator==(const BaseFeatures& other) noexcept { return eq(other); }
inline bool operator!=(const BaseFeatures& other) noexcept { return !eq(other); }
//! \}
//! \name Cast
//! \{
//! Casts this base class into a derived type `T`.
template<typename T>
inline T& as() noexcept { return static_cast<T&>(*this); }
//! Casts this base class into a derived type `T` (const).
template<typename T>
inline const T& as() const noexcept { return static_cast<const T&>(*this); }
//! \}
//! \name Accessors
//! \{
inline bool empty() const noexcept {
for (uint32_t i = 0; i < kNumBitWords; i++)
if (_bits[i])
return false;
return true;
}
//! Returns all features as array of bitwords (see \ref Support::BitWord).
inline BitWord* bits() noexcept { return _bits; }
//! Returns all features as array of bitwords (const).
inline const BitWord* bits() const noexcept { return _bits; }
//! Returns the number of BitWords returned by \ref bits().
inline size_t bitWordCount() const noexcept { return kNumBitWords; }
//! Returns \ref Support::BitVectorIterator, that can be used to iterate
//! all features efficiently
inline Iterator iterator() const noexcept {
return Iterator(_bits, kNumBitWords);
}
//! Tests whether the feature `featureId` is present.
inline bool has(uint32_t featureId) const noexcept {
ASMJIT_ASSERT(featureId < kMaxFeatures);
uint32_t idx = featureId / Support::kBitWordSizeInBits;
uint32_t bit = featureId % Support::kBitWordSizeInBits;
return bool((_bits[idx] >> bit) & 0x1);
}
//! Tests whether all features as defined by `other` are present.
inline bool hasAll(const BaseFeatures& other) const noexcept {
for (uint32_t i = 0; i < kNumBitWords; i++)
if ((_bits[i] & other._bits[i]) != other._bits[i])
return false;
return true;
}
//! \}
//! \name Utilities
//! \{
//! Adds the given CPU `featureId` to the list of features.
inline void add(uint32_t featureId) noexcept {
ASMJIT_ASSERT(featureId < kMaxFeatures);
uint32_t idx = featureId / Support::kBitWordSizeInBits;
uint32_t bit = featureId % Support::kBitWordSizeInBits;
_bits[idx] |= BitWord(1) << bit;
}
template<typename... Args>
inline void add(uint32_t featureId, Args... otherIds) noexcept {
add(featureId);
add(otherIds...);
}
//! Removes the given CPU `featureId` from the list of features.
inline void remove(uint32_t featureId) noexcept {
ASMJIT_ASSERT(featureId < kMaxFeatures);
uint32_t idx = featureId / Support::kBitWordSizeInBits;
uint32_t bit = featureId % Support::kBitWordSizeInBits;
_bits[idx] &= ~(BitWord(1) << bit);
}
template<typename... Args>
inline void remove(uint32_t featureId, Args... otherIds) noexcept {
remove(featureId);
remove(otherIds...);
}
inline bool eq(const BaseFeatures& other) const noexcept {
for (size_t i = 0; i < kNumBitWords; i++)
if (_bits[i] != other._bits[i])
return false;
return true;
}
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_FEATURES_H_INCLUDED

View File

@ -0,0 +1,481 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/builder.h"
#include "../core/codeholder.h"
#include "../core/compiler.h"
#include "../core/emitter.h"
#include "../core/formatter.h"
#include "../core/string.h"
#include "../core/support.h"
#include "../core/type.h"
#ifdef ASMJIT_BUILD_X86
#include "../x86/x86formatter_p.h"
#endif
#ifdef ASMJIT_BUILD_ARM
#include "../arm/armformatter_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
#if defined(ASMJIT_NO_COMPILER)
class VirtReg;
#endif
// ============================================================================
// [asmjit::Formatter]
// ============================================================================
namespace Formatter {
Error formatTypeId(String& sb, uint32_t typeId) noexcept {
if (typeId == Type::kIdVoid)
return sb.append("void");
if (!Type::isValid(typeId))
return sb.append("unknown");
const char* typeName = "unknown";
uint32_t typeSize = Type::sizeOf(typeId);
uint32_t baseId = Type::baseOf(typeId);
switch (baseId) {
case Type::kIdIntPtr : typeName = "iptr" ; break;
case Type::kIdUIntPtr: typeName = "uptr" ; break;
case Type::kIdI8 : typeName = "i8" ; break;
case Type::kIdU8 : typeName = "u8" ; break;
case Type::kIdI16 : typeName = "i16" ; break;
case Type::kIdU16 : typeName = "u16" ; break;
case Type::kIdI32 : typeName = "i32" ; break;
case Type::kIdU32 : typeName = "u32" ; break;
case Type::kIdI64 : typeName = "i64" ; break;
case Type::kIdU64 : typeName = "u64" ; break;
case Type::kIdF32 : typeName = "f32" ; break;
case Type::kIdF64 : typeName = "f64" ; break;
case Type::kIdF80 : typeName = "f80" ; break;
case Type::kIdMask8 : typeName = "mask8" ; break;
case Type::kIdMask16 : typeName = "mask16"; break;
case Type::kIdMask32 : typeName = "mask32"; break;
case Type::kIdMask64 : typeName = "mask64"; break;
case Type::kIdMmx32 : typeName = "mmx32" ; break;
case Type::kIdMmx64 : typeName = "mmx64" ; break;
}
uint32_t baseSize = Type::sizeOf(baseId);
if (typeSize > baseSize) {
uint32_t count = typeSize / baseSize;
return sb.appendFormat("%sx%u", typeName, unsigned(count));
}
else {
return sb.append(typeName);
}
}
Error formatFeature(
String& sb,
uint32_t arch,
uint32_t featureId) noexcept {
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::FormatterInternal::formatFeature(sb, featureId);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isFamilyARM(arch))
return arm::FormatterInternal::formatFeature(sb, featureId);
#endif
return kErrorInvalidArch;
}
Error formatLabel(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t labelId) noexcept {
DebugUtils::unused(formatFlags);
const LabelEntry* le = emitter->code()->labelEntry(labelId);
if (ASMJIT_UNLIKELY(!le))
return sb.appendFormat("<InvalidLabel:%u>", labelId);
if (le->hasName()) {
if (le->hasParent()) {
uint32_t parentId = le->parentId();
const LabelEntry* pe = emitter->code()->labelEntry(parentId);
if (ASMJIT_UNLIKELY(!pe))
ASMJIT_PROPAGATE(sb.appendFormat("<InvalidLabel:%u>", labelId));
else if (ASMJIT_UNLIKELY(!pe->hasName()))
ASMJIT_PROPAGATE(sb.appendFormat("L%u", parentId));
else
ASMJIT_PROPAGATE(sb.append(pe->name()));
ASMJIT_PROPAGATE(sb.append('.'));
}
return sb.append(le->name());
}
else {
return sb.appendFormat("L%u", labelId);
}
}
Error formatRegister(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
uint32_t regType,
uint32_t regId) noexcept {
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isFamilyARM(arch))
return arm::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
#endif
return kErrorInvalidArch;
}
Error formatOperand(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
const Operand_& op) noexcept {
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isFamilyARM(arch))
return arm::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
#endif
return kErrorInvalidArch;
}
Error formatInstruction(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isFamilyARM(arch))
return arm::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
#endif
return kErrorInvalidArch;
}
#ifndef ASMJIT_NO_BUILDER
#ifndef ASMJIT_NO_COMPILER
static Error formatFuncValue(String& sb, uint32_t formatFlags, const BaseEmitter* emitter, FuncValue value) noexcept {
uint32_t typeId = value.typeId();
ASMJIT_PROPAGATE(formatTypeId(sb, typeId));
if (value.isAssigned()) {
ASMJIT_PROPAGATE(sb.append('@'));
if (value.isIndirect())
ASMJIT_PROPAGATE(sb.append('['));
// NOTE: It should be either reg or stack, but never both. We
// use two IFs on purpose so if the FuncValue is both it would
// show in logs.
if (value.isReg()) {
ASMJIT_PROPAGATE(formatRegister(sb, formatFlags, emitter, emitter->arch(), value.regType(), value.regId()));
}
if (value.isStack()) {
ASMJIT_PROPAGATE(sb.appendFormat("[%d]", int(value.stackOffset())));
}
if (value.isIndirect())
ASMJIT_PROPAGATE(sb.append(']'));
}
return kErrorOk;
}
static Error formatFuncValuePack(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
const FuncValuePack& pack,
VirtReg* const* vRegs) noexcept {
size_t count = pack.count();
if (!count)
return sb.append("void");
if (count > 1)
sb.append('[');
for (uint32_t valueIndex = 0; valueIndex < count; valueIndex++) {
const FuncValue& value = pack[valueIndex];
if (!value)
break;
if (valueIndex)
ASMJIT_PROPAGATE(sb.append(", "));
ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, emitter, value));
if (vRegs) {
static const char nullRet[] = "<none>";
ASMJIT_PROPAGATE(sb.appendFormat(" %s", vRegs[valueIndex] ? vRegs[valueIndex]->name() : nullRet));
}
}
if (count > 1)
sb.append(']');
return kErrorOk;
}
static Error formatFuncRets(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
const FuncDetail& fd) noexcept {
return formatFuncValuePack(sb, formatFlags, emitter, fd.retPack(), nullptr);
}
static Error formatFuncArgs(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
const FuncDetail& fd,
const FuncNode::ArgPack* argPacks) noexcept {
uint32_t argCount = fd.argCount();
if (!argCount)
return sb.append("void");
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
if (argIndex)
ASMJIT_PROPAGATE(sb.append(", "));
ASMJIT_PROPAGATE(formatFuncValuePack(sb, formatFlags, emitter, fd.argPack(argIndex), argPacks[argIndex]._data));
}
return kErrorOk;
}
#endif
Error formatNode(
String& sb,
uint32_t formatFlags,
const BaseBuilder* builder,
const BaseNode* node) noexcept {
if (node->hasPosition() && (formatFlags & FormatOptions::kFlagPositions) != 0)
ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node->position()));
switch (node->type()) {
case BaseNode::kNodeInst:
case BaseNode::kNodeJump: {
const InstNode* instNode = node->as<InstNode>();
ASMJIT_PROPAGATE(
formatInstruction(sb, formatFlags, builder,
builder->arch(),
instNode->baseInst(), instNode->operands(), instNode->opCount()));
break;
}
case BaseNode::kNodeSection: {
const SectionNode* sectionNode = node->as<SectionNode>();
if (builder->_code->isSectionValid(sectionNode->id())) {
const Section* section = builder->_code->sectionById(sectionNode->id());
ASMJIT_PROPAGATE(sb.appendFormat(".section %s", section->name()));
}
break;
}
case BaseNode::kNodeLabel: {
const LabelNode* labelNode = node->as<LabelNode>();
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, labelNode->labelId()));
ASMJIT_PROPAGATE(sb.append(":"));
break;
}
case BaseNode::kNodeAlign: {
const AlignNode* alignNode = node->as<AlignNode>();
ASMJIT_PROPAGATE(
sb.appendFormat("align %u (%s)",
alignNode->alignment(),
alignNode->alignMode() == kAlignCode ? "code" : "data"));
break;
}
case BaseNode::kNodeEmbedData: {
const EmbedDataNode* embedNode = node->as<EmbedDataNode>();
ASMJIT_PROPAGATE(sb.append("embed "));
if (embedNode->repeatCount() != 1)
ASMJIT_PROPAGATE(sb.appendFormat("[repeat=%zu] ", size_t(embedNode->repeatCount())));
ASMJIT_PROPAGATE(sb.appendFormat("%u bytes", embedNode->dataSize()));
break;
}
case BaseNode::kNodeEmbedLabel: {
const EmbedLabelNode* embedNode = node->as<EmbedLabelNode>();
ASMJIT_PROPAGATE(sb.append(".label "));
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, embedNode->labelId()));
break;
}
case BaseNode::kNodeEmbedLabelDelta: {
const EmbedLabelDeltaNode* embedNode = node->as<EmbedLabelDeltaNode>();
ASMJIT_PROPAGATE(sb.append(".label ("));
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, embedNode->labelId()));
ASMJIT_PROPAGATE(sb.append(" - "));
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, embedNode->baseLabelId()));
ASMJIT_PROPAGATE(sb.append(")"));
break;
}
case BaseNode::kNodeComment: {
const CommentNode* commentNode = node->as<CommentNode>();
ASMJIT_PROPAGATE(sb.appendFormat("; %s", commentNode->inlineComment()));
break;
}
case BaseNode::kNodeSentinel: {
const SentinelNode* sentinelNode = node->as<SentinelNode>();
const char* sentinelName = nullptr;
switch (sentinelNode->sentinelType()) {
case SentinelNode::kSentinelFuncEnd:
sentinelName = "[FuncEnd]";
break;
default:
sentinelName = "[Sentinel]";
break;
}
ASMJIT_PROPAGATE(sb.append(sentinelName));
break;
}
#ifndef ASMJIT_NO_COMPILER
case BaseNode::kNodeFunc: {
const FuncNode* funcNode = node->as<FuncNode>();
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, funcNode->labelId()));
ASMJIT_PROPAGATE(sb.append(": "));
ASMJIT_PROPAGATE(formatFuncRets(sb, formatFlags, builder, funcNode->detail()));
ASMJIT_PROPAGATE(sb.append(" Func("));
ASMJIT_PROPAGATE(formatFuncArgs(sb, formatFlags, builder, funcNode->detail(), funcNode->argPacks()));
ASMJIT_PROPAGATE(sb.append(")"));
break;
}
case BaseNode::kNodeFuncRet: {
const FuncRetNode* retNode = node->as<FuncRetNode>();
ASMJIT_PROPAGATE(sb.append("[FuncRet]"));
for (uint32_t i = 0; i < 2; i++) {
const Operand_& op = retNode->_opArray[i];
if (!op.isNone()) {
ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", "));
ASMJIT_PROPAGATE(formatOperand(sb, formatFlags, builder, builder->arch(), op));
}
}
break;
}
case BaseNode::kNodeInvoke: {
const InvokeNode* invokeNode = node->as<InvokeNode>();
ASMJIT_PROPAGATE(
formatInstruction(sb, formatFlags, builder,
builder->arch(),
invokeNode->baseInst(), invokeNode->operands(), invokeNode->opCount()));
break;
}
#endif
default: {
ASMJIT_PROPAGATE(sb.appendFormat("[UserNode:%u]", node->type()));
break;
}
}
return kErrorOk;
}
Error formatNodeList(
String& sb,
uint32_t formatFlags,
const BaseBuilder* builder) noexcept {
return formatNodeList(sb, formatFlags, builder, builder->firstNode(), nullptr);
}
Error formatNodeList(
String& sb,
uint32_t formatFlags,
const BaseBuilder* builder,
const BaseNode* begin,
const BaseNode* end) noexcept {
const BaseNode* node = begin;
while (node != end) {
ASMJIT_PROPAGATE(formatNode(sb, formatFlags, builder, node));
ASMJIT_PROPAGATE(sb.append('\n'));
node = node->next();
}
return kErrorOk;
}
#endif
} // {Formatter}
ASMJIT_END_NAMESPACE
#endif

256
deps/asmjit/src/asmjit/core/formatter.h vendored Normal file
View File

@ -0,0 +1,256 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_FORMATTER_H_INCLUDED
#define ASMJIT_CORE_FORMATTER_H_INCLUDED
#include "../core/inst.h"
#include "../core/string.h"
#ifndef ASMJIT_NO_LOGGING
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_logging
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class BaseEmitter;
struct Operand_;
#ifndef ASMJIT_NO_BUILDER
class BaseBuilder;
class BaseNode;
#endif
#ifndef ASMJIT_NO_COMPILER
class BaseCompiler;
#endif
// ============================================================================
// [asmjit::FormatOptions]
// ============================================================================
//! Formatting options used by \ref Logger and \ref Formatter.
class FormatOptions {
public:
//! Format flags, see \ref Flags.
uint32_t _flags;
//! Indentation by type, see \ref IndentationType.
uint8_t _indentation[4];
//! Flags can enable a logging feature.
enum Flags : uint32_t {
//! No flags.
kNoFlags = 0u,
//! Show also binary form of each logged instruction (Assembler).
kFlagMachineCode = 0x00000001u,
//! Show a text explanation of some immediate values.
kFlagExplainImms = 0x00000002u,
//! Use hexadecimal notation of immediate values.
kFlagHexImms = 0x00000004u,
//! Use hexadecimal notation of address offsets.
kFlagHexOffsets = 0x00000008u,
//! Show casts between virtual register types (Compiler).
kFlagRegCasts = 0x00000010u,
//! Show positions associated with nodes (Compiler).
kFlagPositions = 0x00000020u,
//! Annotate nodes that are lowered by passes.
kFlagAnnotations = 0x00000040u,
// TODO: These must go, keep this only for formatting.
//! Show an additional output from passes.
kFlagDebugPasses = 0x00000080u,
//! Show an additional output from RA.
kFlagDebugRA = 0x00000100u
};
//! Describes indentation type of code, label, or comment in logger output.
enum IndentationType : uint32_t {
//! Indentation used for instructions and directives.
kIndentationCode = 0u,
//! Indentation used for labels and function nodes.
kIndentationLabel = 1u,
//! Indentation used for comments (not inline comments).
kIndentationComment = 2u,
//! \cond INTERNAL
//! Reserved for future use.
kIndentationReserved = 3u
//! \endcond
};
//! \name Construction & Destruction
//! \{
//! Creates a default-initialized FormatOptions.
constexpr FormatOptions() noexcept
: _flags(0),
_indentation { 0, 0, 0, 0 } {}
constexpr FormatOptions(const FormatOptions& other) noexcept = default;
inline FormatOptions& operator=(const FormatOptions& other) noexcept = default;
//! Resets FormatOptions to its default initialized state.
inline void reset() noexcept {
_flags = 0;
_indentation[0] = 0;
_indentation[1] = 0;
_indentation[2] = 0;
_indentation[3] = 0;
}
//! \}
//! \name Accessors
//! \{
//! Returns format flags.
constexpr uint32_t flags() const noexcept { return _flags; }
//! Tests whether the given `flag` is set in format flags.
constexpr bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
//! Resets all format flags to `flags`.
inline void setFlags(uint32_t flags) noexcept { _flags = flags; }
//! Adds `flags` to format flags.
inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
//! Removes `flags` from format flags.
inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
//! Returns indentation for the given `type`, see \ref IndentationType.
constexpr uint8_t indentation(uint32_t type) const noexcept { return _indentation[type]; }
//! Sets indentation for the given `type`, see \ref IndentationType.
inline void setIndentation(uint32_t type, uint32_t n) noexcept { _indentation[type] = uint8_t(n); }
//! Resets indentation for the given `type` to zero.
inline void resetIndentation(uint32_t type) noexcept { _indentation[type] = uint8_t(0); }
//! \}
};
// ============================================================================
// [asmjit::Formatter]
// ============================================================================
//! Provides formatting functionality to format operands, instructions, and nodes.
namespace Formatter {
//! Appends a formatted `typeId` to the output string `sb`.
ASMJIT_API Error formatTypeId(
String& sb,
uint32_t typeId) noexcept;
//! Appends a formatted `featureId` to the output string `sb`.
//!
//! See \ref BaseFeatures.
ASMJIT_API Error formatFeature(
String& sb,
uint32_t arch,
uint32_t featureId) noexcept;
//! Appends a formatted register to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format virtual registers,
//! which won't be formatted properly if the `emitter` is not provided.
ASMJIT_API Error formatRegister(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
uint32_t regType,
uint32_t regId) noexcept;
//! Appends a formatted label to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels
//! properly, otherwise the formatted as it is an anonymous label.
ASMJIT_API Error formatLabel(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t labelId) noexcept;
//! Appends a formatted operand to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels and
//! virtual registers. See \ref formatRegister() and \ref formatLabel() for
//! more details.
ASMJIT_API Error formatOperand(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
const Operand_& op) noexcept;
//! Appends a formatted instruction to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels and
//! virtual registers. See \ref formatRegister() and \ref formatLabel() for
//! more details.
ASMJIT_API Error formatInstruction(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
#ifndef ASMJIT_NO_BUILDER
//! Appends a formatted node to the output string `sb`.
//!
//! The `node` must belong to the provided `builder`.
ASMJIT_API Error formatNode(
String& sb,
uint32_t formatFlags,
const BaseBuilder* builder,
const BaseNode* node) noexcept;
//! Appends formatted nodes to the output string `sb`.
//!
//! All nodes that are part of the given `builder` will be appended.
ASMJIT_API Error formatNodeList(
String& sb,
uint32_t formatFlags,
const BaseBuilder* builder) noexcept;
//! Appends formatted nodes to the output string `sb`.
//!
//! This function works the same as \ref formatNode(), but appends more nodes
//! to the output string, separating each node with a newline '\n' character.
ASMJIT_API Error formatNodeList(
String& sb,
uint32_t formatFlags,
const BaseBuilder* builder,
const BaseNode* begin,
const BaseNode* end) noexcept;
#endif
} // {Formatter}
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif // ASMJIT_CORE_FORMATTER_H_INCLUDED

310
deps/asmjit/src/asmjit/core/func.cpp vendored Normal file
View File

@ -0,0 +1,310 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/func.h"
#include "../core/operand.h"
#include "../core/type.h"
#include "../core/funcargscontext_p.h"
#ifdef ASMJIT_BUILD_X86
#include "../x86/x86func_p.h"
#endif
#ifdef ASMJIT_BUILD_ARM
#include "../arm/armfunc_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::CallConv - Init / Reset]
// ============================================================================
ASMJIT_FAVOR_SIZE Error CallConv::init(uint32_t ccId, const Environment& environment) noexcept {
reset();
#ifdef ASMJIT_BUILD_X86
if (environment.isFamilyX86())
return x86::FuncInternal::initCallConv(*this, ccId, environment);
#endif
#ifdef ASMJIT_BUILD_ARM
if (environment.isFamilyARM())
return arm::FuncInternal::initCallConv(*this, ccId, environment);
#endif
return DebugUtils::errored(kErrorInvalidArgument);
}
// ============================================================================
// [asmjit::FuncDetail - Init / Reset]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const Environment& environment) noexcept {
uint32_t ccId = signature.callConv();
uint32_t argCount = signature.argCount();
if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs))
return DebugUtils::errored(kErrorInvalidArgument);
CallConv& cc = _callConv;
ASMJIT_PROPAGATE(cc.init(ccId, environment));
uint32_t registerSize = Environment::registerSizeFromArch(cc.arch());
uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize);
const uint8_t* signatureArgs = signature.args();
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
FuncValuePack& argPack = _args[argIndex];
argPack[0].initTypeId(Type::deabstract(signatureArgs[argIndex], deabstractDelta));
}
_argCount = uint8_t(argCount);
_vaIndex = uint8_t(signature.vaIndex());
uint32_t ret = signature.ret();
if (ret != Type::kIdVoid)
_rets[0].initTypeId(Type::deabstract(ret, deabstractDelta));
#ifdef ASMJIT_BUILD_X86
if (environment.isFamilyX86())
return x86::FuncInternal::initFuncDetail(*this, signature, registerSize);
#endif
#ifdef ASMJIT_BUILD_ARM
if (environment.isFamilyARM())
return arm::FuncInternal::initFuncDetail(*this, signature, registerSize);
#endif
// We should never bubble here as if `cc.init()` succeeded then there has to
// be an implementation for the current architecture. However, stay safe.
return DebugUtils::errored(kErrorInvalidArgument);
}
// ============================================================================
// [asmjit::FuncFrame - Init / Finalize]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
uint32_t arch = func.callConv().arch();
if (!Environment::isValidArch(arch))
return DebugUtils::errored(kErrorInvalidArch);
const ArchTraits& archTraits = ArchTraits::byArch(arch);
// Initializing FuncFrame means making a copy of some properties of `func`.
// Properties like `_localStackSize` will be set by the user before the frame
// is finalized.
reset();
_arch = uint8_t(arch);
_spRegId = uint8_t(archTraits.spRegId());
_saRegId = uint8_t(BaseReg::kIdBad);
uint32_t naturalStackAlignment = func.callConv().naturalStackAlignment();
uint32_t minDynamicAlignment = Support::max<uint32_t>(naturalStackAlignment, 16);
if (minDynamicAlignment == naturalStackAlignment)
minDynamicAlignment <<= 1;
_naturalStackAlignment = uint8_t(naturalStackAlignment);
_minDynamicAlignment = uint8_t(minDynamicAlignment);
_redZoneSize = uint8_t(func.redZoneSize());
_spillZoneSize = uint8_t(func.spillZoneSize());
_finalStackAlignment = uint8_t(_naturalStackAlignment);
if (func.hasFlag(CallConv::kFlagCalleePopsStack)) {
_calleeStackCleanup = uint16_t(func.argStackSize());
}
// Initial masks of dirty and preserved registers.
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
_dirtyRegs[group] = func.usedRegs(group);
_preservedRegs[group] = func.preservedRegs(group);
}
// Exclude stack pointer - this register is never included in saved GP regs.
_preservedRegs[BaseReg::kGroupGp] &= ~Support::bitMask(archTraits.spRegId());
// The size and alignment of save/restore area of registers for each significant register group.
memcpy(_saveRestoreRegSize, func.callConv()._saveRestoreRegSize, sizeof(_saveRestoreRegSize));
memcpy(_saveRestoreAlignment, func.callConv()._saveRestoreAlignment, sizeof(_saveRestoreAlignment));
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
if (!Environment::isValidArch(arch()))
return DebugUtils::errored(kErrorInvalidArch);
const ArchTraits& archTraits = ArchTraits::byArch(arch());
uint32_t registerSize = _saveRestoreRegSize[BaseReg::kGroupGp];
uint32_t vectorSize = _saveRestoreRegSize[BaseReg::kGroupVec];
uint32_t returnAddressSize = archTraits.hasLinkReg() ? 0u : registerSize;
// The final stack alignment must be updated accordingly to call and local stack alignments.
uint32_t stackAlignment = _finalStackAlignment;
ASMJIT_ASSERT(stackAlignment == Support::max(_naturalStackAlignment,
_callStackAlignment,
_localStackAlignment));
bool hasFP = hasPreservedFP();
bool hasDA = hasDynamicAlignment();
uint32_t kSp = archTraits.spRegId();
uint32_t kFp = archTraits.fpRegId();
uint32_t kLr = archTraits.linkRegId();
// Make frame pointer dirty if the function uses it.
if (hasFP) {
_dirtyRegs[BaseReg::kGroupGp] |= Support::bitMask(kFp);
// Currently required by ARM, if this works differently across architectures
// we would have to generalize most likely in CallConv.
if (kLr != BaseReg::kIdBad)
_dirtyRegs[BaseReg::kGroupGp] |= Support::bitMask(kLr);
}
// These two are identical if the function doesn't align its stack dynamically.
uint32_t saRegId = _saRegId;
if (saRegId == BaseReg::kIdBad)
saRegId = kSp;
// Fix stack arguments base-register from SP to FP in case it was not picked
// before and the function performs dynamic stack alignment.
if (hasDA && saRegId == kSp)
saRegId = kFp;
// Mark as dirty any register but SP if used as SA pointer.
if (saRegId != kSp)
_dirtyRegs[BaseReg::kGroupGp] |= Support::bitMask(saRegId);
_spRegId = uint8_t(kSp);
_saRegId = uint8_t(saRegId);
// Setup stack size used to save preserved registers.
uint32_t saveRestoreSizes[2] {};
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
saveRestoreSizes[size_t(!archTraits.hasPushPop(group))]
+= Support::alignUp(Support::popcnt(savedRegs(group)) * saveRestoreRegSize(group), saveRestoreAlignment(group));
_pushPopSaveSize = uint16_t(saveRestoreSizes[0]);
_extraRegSaveSize = uint16_t(saveRestoreSizes[1]);
uint32_t v = 0; // The beginning of the stack frame relative to SP after prolog.
v += callStackSize(); // Count 'callStackSize' <- This is used to call functions.
v = Support::alignUp(v, stackAlignment); // Align to function's stack alignment.
_localStackOffset = v; // Store 'localStackOffset' <- Function's local stack starts here.
v += localStackSize(); // Count 'localStackSize' <- Function's local stack ends here.
// If the function's stack must be aligned, calculate the alignment necessary
// to store vector registers, and set `FuncFrame::kAttrAlignedVecSR` to inform
// PEI that it can use instructions that perform aligned stores/loads.
if (stackAlignment >= vectorSize && _extraRegSaveSize) {
addAttributes(FuncFrame::kAttrAlignedVecSR);
v = Support::alignUp(v, vectorSize); // Align 'extraRegSaveOffset'.
}
_extraRegSaveOffset = v; // Store 'extraRegSaveOffset' <- Non-GP save/restore starts here.
v += _extraRegSaveSize; // Count 'extraRegSaveSize' <- Non-GP save/restore ends here.
// Calculate if dynamic alignment (DA) slot (stored as offset relative to SP) is required and its offset.
if (hasDA && !hasFP) {
_daOffset = v; // Store 'daOffset' <- DA pointer would be stored here.
v += registerSize; // Count 'daOffset'.
}
else {
_daOffset = FuncFrame::kTagInvalidOffset;
}
// Link Register
// -------------
//
// The stack is aligned after the function call as the return address is
// stored in a link register. Some architectures may require to always
// have aligned stack after PUSH/POP operation, which is represented by
// ArchTraits::stackAlignmentConstraint().
//
// No Link Register (X86/X64)
// --------------------------
//
// The return address should be stored after GP save/restore regs. It has
// the same size as `registerSize` (basically the native register/pointer
// size). We don't adjust it now as `v` now contains the exact size that the
// function requires to adjust (call frame + stack frame, vec stack size).
// The stack (if we consider this size) is misaligned now, as it's always
// aligned before the function call - when `call()` is executed it pushes
// the current EIP|RIP onto the stack, and misaligns it by 12 or 8 bytes
// (depending on the architecture). So count number of bytes needed to align
// it up to the function's CallFrame (the beginning).
if (v || hasFuncCalls() || !returnAddressSize)
v += Support::alignUpDiff(v + pushPopSaveSize() + returnAddressSize, stackAlignment);
_pushPopSaveOffset = v; // Store 'pushPopSaveOffset' <- Function's push/pop save/restore starts here.
_stackAdjustment = v; // Store 'stackAdjustment' <- SA used by 'add SP, SA' and 'sub SP, SA'.
v += _pushPopSaveSize; // Count 'pushPopSaveSize' <- Function's push/pop save/restore ends here.
_finalStackSize = v; // Store 'finalStackSize' <- Final stack used by the function.
if (!archTraits.hasLinkReg())
v += registerSize; // Count 'ReturnAddress' <- As CALL pushes onto stack.
// If the function performs dynamic stack alignment then the stack-adjustment must be aligned.
if (hasDA)
_stackAdjustment = Support::alignUp(_stackAdjustment, stackAlignment);
// Calculate where the function arguments start relative to SP.
_saOffsetFromSP = hasDA ? FuncFrame::kTagInvalidOffset : v;
// Calculate where the function arguments start relative to FP or user-provided register.
_saOffsetFromSA = hasFP ? returnAddressSize + registerSize // Return address + frame pointer.
: returnAddressSize + _pushPopSaveSize; // Return address + all push/pop regs.
return kErrorOk;
}
// ============================================================================
// [asmjit::FuncArgsAssignment]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) const noexcept {
uint32_t arch = frame.arch();
const FuncDetail* func = funcDetail();
if (!func)
return DebugUtils::errored(kErrorInvalidState);
RAConstraints constraints;
ASMJIT_PROPAGATE(constraints.init(arch));
FuncArgsContext ctx;
ASMJIT_PROPAGATE(ctx.initWorkData(frame, *this, &constraints));
ASMJIT_PROPAGATE(ctx.markDstRegsDirty(frame));
ASMJIT_PROPAGATE(ctx.markScratchRegs(frame));
ASMJIT_PROPAGATE(ctx.markStackArgsReg(frame));
return kErrorOk;
}
ASMJIT_END_NAMESPACE

1426
deps/asmjit/src/asmjit/core/func.h vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,315 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/funcargscontext_p.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
FuncArgsContext::FuncArgsContext() noexcept {
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
_workData[group].reset();
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept {
// The code has to be updated if this changes.
ASMJIT_ASSERT(BaseReg::kGroupVirt == 4);
uint32_t i;
uint32_t arch = frame.arch();
const FuncDetail& func = *args.funcDetail();
_archTraits = &ArchTraits::byArch(arch);
_constraints = constraints;
_arch = uint8_t(arch);
// Initialize `_archRegs`.
for (i = 0; i < BaseReg::kGroupVirt; i++)
_workData[i]._archRegs = _constraints->availableRegs(i);
if (frame.hasPreservedFP())
_workData[BaseReg::kGroupGp]._archRegs &= ~Support::bitMask(archTraits().fpRegId());
// Extract information from all function arguments/assignments and build Var[] array.
uint32_t varId = 0;
for (uint32_t argIndex = 0; argIndex < Globals::kMaxFuncArgs; argIndex++) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
const FuncValue& dst_ = args.arg(argIndex, valueIndex);
if (!dst_.isAssigned())
continue;
const FuncValue& src_ = func.arg(argIndex, valueIndex);
if (ASMJIT_UNLIKELY(!src_.isAssigned()))
return DebugUtils::errored(kErrorInvalidState);
Var& var = _vars[varId];
var.init(src_, dst_);
FuncValue& src = var.cur;
FuncValue& dst = var.out;
uint32_t dstGroup = 0xFFFFFFFFu;
uint32_t dstId = BaseReg::kIdBad;
WorkData* dstWd = nullptr;
// Not supported.
if (src.isIndirect())
return DebugUtils::errored(kErrorInvalidAssignment);
if (dst.isReg()) {
uint32_t dstType = dst.regType();
if (ASMJIT_UNLIKELY(!archTraits().hasRegType(dstType)))
return DebugUtils::errored(kErrorInvalidRegType);
// Copy TypeId from source if the destination doesn't have it. The RA
// used by BaseCompiler would never leave TypeId undefined, but users
// of FuncAPI can just assign phys regs without specifying the type.
if (!dst.hasTypeId())
dst.setTypeId(archTraits().regTypeToTypeId(dst.regType()));
dstGroup = archTraits().regTypeToGroup(dstType);
if (ASMJIT_UNLIKELY(dstGroup >= BaseReg::kGroupVirt))
return DebugUtils::errored(kErrorInvalidRegGroup);
dstWd = &_workData[dstGroup];
dstId = dst.regId();
if (ASMJIT_UNLIKELY(dstId >= 32 || !Support::bitTest(dstWd->archRegs(), dstId)))
return DebugUtils::errored(kErrorInvalidPhysId);
if (ASMJIT_UNLIKELY(Support::bitTest(dstWd->dstRegs(), dstId)))
return DebugUtils::errored(kErrorOverlappedRegs);
dstWd->_dstRegs |= Support::bitMask(dstId);
dstWd->_dstShuf |= Support::bitMask(dstId);
dstWd->_usedRegs |= Support::bitMask(dstId);
}
else {
if (!dst.hasTypeId())
dst.setTypeId(src.typeId());
RegInfo regInfo = getSuitableRegForMemToMemMove(arch, dst.typeId(), src.typeId());
if (ASMJIT_UNLIKELY(!regInfo.isValid()))
return DebugUtils::errored(kErrorInvalidState);
_stackDstMask = uint8_t(_stackDstMask | Support::bitMask(regInfo.group()));
}
if (src.isReg()) {
uint32_t srcId = src.regId();
uint32_t srcGroup = archTraits().regTypeToGroup(src.regType());
if (dstGroup == srcGroup) {
dstWd->assign(varId, srcId);
// The best case, register is allocated where it is expected to be.
if (dstId == srcId)
var.markDone();
}
else {
if (ASMJIT_UNLIKELY(srcGroup >= BaseReg::kGroupVirt))
return DebugUtils::errored(kErrorInvalidState);
WorkData& srcData = _workData[srcGroup];
srcData.assign(varId, srcId);
}
}
else {
if (dstWd)
dstWd->_numStackArgs++;
_hasStackSrc = true;
}
varId++;
}
}
// Initialize WorkData::workRegs.
for (i = 0; i < BaseReg::kGroupVirt; i++) {
_workData[i]._workRegs = (_workData[i].archRegs() & (frame.dirtyRegs(i) | ~frame.preservedRegs(i))) | _workData[i].dstRegs() | _workData[i].assignedRegs();
}
// Create a variable that represents `SARegId` if necessary.
bool saRegRequired = _hasStackSrc && frame.hasDynamicAlignment() && !frame.hasPreservedFP();
WorkData& gpRegs = _workData[BaseReg::kGroupGp];
uint32_t saCurRegId = frame.saRegId();
uint32_t saOutRegId = args.saRegId();
if (saCurRegId != BaseReg::kIdBad) {
// Check if the provided `SARegId` doesn't collide with input registers.
if (ASMJIT_UNLIKELY(gpRegs.isAssigned(saCurRegId)))
return DebugUtils::errored(kErrorOverlappedRegs);
}
if (saOutRegId != BaseReg::kIdBad) {
// Check if the provided `SARegId` doesn't collide with argument assignments.
if (ASMJIT_UNLIKELY(Support::bitTest(gpRegs.dstRegs(), saOutRegId)))
return DebugUtils::errored(kErrorOverlappedRegs);
saRegRequired = true;
}
if (saRegRequired) {
uint32_t ptrTypeId = Environment::is32Bit(arch) ? Type::kIdU32 : Type::kIdU64;
uint32_t ptrRegType = Environment::is32Bit(arch) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
_saVarId = uint8_t(varId);
_hasPreservedFP = frame.hasPreservedFP();
Var& var = _vars[varId];
var.reset();
if (saCurRegId == BaseReg::kIdBad) {
if (saOutRegId != BaseReg::kIdBad && !gpRegs.isAssigned(saOutRegId)) {
saCurRegId = saOutRegId;
}
else {
uint32_t availableRegs = gpRegs.availableRegs();
if (!availableRegs)
availableRegs = gpRegs.archRegs() & ~gpRegs.workRegs();
if (ASMJIT_UNLIKELY(!availableRegs))
return DebugUtils::errored(kErrorNoMorePhysRegs);
saCurRegId = Support::ctz(availableRegs);
}
}
var.cur.initReg(ptrRegType, saCurRegId, ptrTypeId);
gpRegs.assign(varId, saCurRegId);
gpRegs._workRegs |= Support::bitMask(saCurRegId);
if (saOutRegId != BaseReg::kIdBad) {
var.out.initReg(ptrRegType, saOutRegId, ptrTypeId);
gpRegs._dstRegs |= Support::bitMask(saOutRegId);
gpRegs._workRegs |= Support::bitMask(saOutRegId);
}
else {
var.markDone();
}
varId++;
}
_varCount = varId;
// Detect register swaps.
for (varId = 0; varId < _varCount; varId++) {
Var& var = _vars[varId];
if (var.cur.isReg() && var.out.isReg()) {
uint32_t srcId = var.cur.regId();
uint32_t dstId = var.out.regId();
uint32_t group = archTraits().regTypeToGroup(var.cur.regType());
if (group != archTraits().regTypeToGroup(var.out.regType()))
continue;
WorkData& wd = _workData[group];
if (wd.isAssigned(dstId)) {
Var& other = _vars[wd._physToVarId[dstId]];
if (archTraits().regTypeToGroup(other.out.regType()) == group && other.out.regId() == srcId) {
wd._numSwaps++;
_regSwapsMask = uint8_t(_regSwapsMask | Support::bitMask(group));
}
}
}
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markDstRegsDirty(FuncFrame& frame) noexcept {
for (uint32_t i = 0; i < BaseReg::kGroupVirt; i++) {
WorkData& wd = _workData[i];
uint32_t regs = wd.usedRegs() | wd._dstShuf;
wd._workRegs |= regs;
frame.addDirtyRegs(i, regs);
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markScratchRegs(FuncFrame& frame) noexcept {
uint32_t groupMask = 0;
// Handle stack to stack moves.
groupMask |= _stackDstMask;
// Handle register swaps.
groupMask |= _regSwapsMask & ~Support::bitMask(BaseReg::kGroupGp);
if (!groupMask)
return kErrorOk;
// Selects one dirty register per affected group that can be used as a scratch register.
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
if (Support::bitTest(groupMask, group)) {
WorkData& wd = _workData[group];
// Initially, pick some clobbered or dirty register.
uint32_t workRegs = wd.workRegs();
uint32_t regs = workRegs & ~(wd.usedRegs() | wd._dstShuf);
// If that didn't work out pick some register which is not in 'used'.
if (!regs)
regs = workRegs & ~wd.usedRegs();
// If that didn't work out pick any other register that is allocable.
// This last resort case will, however, result in marking one more
// register dirty.
if (!regs)
regs = wd.archRegs() & ~workRegs;
// If that didn't work out we will have to use XORs instead of MOVs.
if (!regs)
continue;
uint32_t regMask = Support::blsi(regs);
wd._workRegs |= regMask;
frame.addDirtyRegs(group, regMask);
}
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markStackArgsReg(FuncFrame& frame) noexcept {
if (_saVarId != kVarIdNone) {
const Var& var = _vars[_saVarId];
frame.setSARegId(var.cur.regId());
}
else if (frame.hasPreservedFP()) {
frame.setSARegId(archTraits().fpRegId());
}
return kErrorOk;
}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE

Some files were not shown because too many files have changed in this diff Show More