1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[mlgo] Skip AOT-compiling a model if a header/object pair is provided

This allows one to cross-compile the header/object for a model in a
setup where the compiler is built on a system that cannot host the AOT
compiler. For example, if arm-hostable clang is desired, while the AOT
Tensorflow compiler can cross-compile to arm, it can't currently run on
arm.

The only alternative in that scenario would be to cross-compile clang
itself, but that gets complicated when trying to run tests after that.

Differential Revision: https://reviews.llvm.org/D99992
This commit is contained in:
Mircea Trofin 2021-04-05 16:15:36 -07:00
parent 157f4d3031
commit e3f85d297f

View File

@ -16,20 +16,31 @@ endfunction()
# ${CMAKE_CURRENT_BINARY_DIR}. The generated header will define a C++ class # ${CMAKE_CURRENT_BINARY_DIR}. The generated header will define a C++ class
# called ${cpp_class} - which may be a namespace-qualified class name. # called ${cpp_class} - which may be a namespace-qualified class name.
function(tfcompile model tag_set signature_def_key fname cpp_class) function(tfcompile model tag_set signature_def_key fname cpp_class)
tfgetmodel(${model} LLVM_ML_MODELS_ABSOLUTE)
message("Using model at " ${LLVM_ML_MODELS_ABSOLUTE})
set(prefix ${CMAKE_CURRENT_BINARY_DIR}/${fname}) set(prefix ${CMAKE_CURRENT_BINARY_DIR}/${fname})
set(obj_file ${prefix}.o) set(obj_file ${prefix}.o)
set(hdr_file ${prefix}.h) set(hdr_file ${prefix}.h)
add_custom_command(OUTPUT ${obj_file} ${hdr_file} string(TOUPPER ${fname} fname_allcaps)
COMMAND "XLA_FLAGS=\"--xla_cpu_multi_thread_eigen=false\"" ${TENSORFLOW_AOT_COMPILER} aot_compile_cpu set(override_header ${LLVM_OVERRIDE_MODEL_HEADER_${fname_allcaps}})
--dir ${LLVM_ML_MODELS_ABSOLUTE} set(override_object ${LLVM_OVERRIDE_MODEL_OBJECT_${fname_allcaps}})
--tag_set ${tag_set} if (EXISTS "${override_header}" AND EXISTS "${override_object}")
--signature_def_key ${signature_def_key} configure_file(${override_header} ${hdr_file} COPYONLY)
--output_prefix ${prefix} configure_file(${override_object} ${obj_file} COPYONLY)
--cpp_class ${cpp_class} message("Using provided header "
--target_triple ${LLVM_HOST_TRIPLE} ${hdr_file} " and object " ${obj_file}
) " files for model " ${model})
else()
tfgetmodel(${model} LLVM_ML_MODELS_ABSOLUTE)
message("Using model at " ${LLVM_ML_MODELS_ABSOLUTE})
add_custom_command(OUTPUT ${obj_file} ${hdr_file}
COMMAND "XLA_FLAGS=\"--xla_cpu_multi_thread_eigen=false\"" ${TENSORFLOW_AOT_COMPILER} aot_compile_cpu
--dir ${LLVM_ML_MODELS_ABSOLUTE}
--tag_set ${tag_set}
--signature_def_key ${signature_def_key}
--output_prefix ${prefix}
--cpp_class ${cpp_class}
--target_triple ${LLVM_HOST_TRIPLE}
)
endif()
# Aggregate the objects so that results of different tfcompile calls may be # Aggregate the objects so that results of different tfcompile calls may be
# grouped into one target. # grouped into one target.