mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 10:42:39 +01:00
[mlgo] Skip AOT-compiling a model if a header/object pair is provided
This allows one to cross-compile the header/object for a model in a setup where the compiler is built on a system that cannot host the AOT compiler. For example, if arm-hostable clang is desired, while the AOT Tensorflow compiler can cross-compile to arm, it can't currently run on arm. The only alternative in that scenario would be to cross-compile clang itself, but that gets complicated when trying to run tests after that. Differential Revision: https://reviews.llvm.org/D99992
This commit is contained in:
parent
157f4d3031
commit
e3f85d297f
@ -9,27 +9,38 @@ function(tfgetmodel model final_path)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
# Run the tensorflow compiler (saved_model_cli) on the saved model in the
|
||||
# Run the tensorflow compiler (saved_model_cli) on the saved model in the
|
||||
# ${model} directory, looking for the ${tag_set} tag set, and the SignatureDef
|
||||
# ${signature_def_key}.
|
||||
# Produce a pair of files called ${fname}.h and ${fname}.o in the
|
||||
# Produce a pair of files called ${fname}.h and ${fname}.o in the
|
||||
# ${CMAKE_CURRENT_BINARY_DIR}. The generated header will define a C++ class
|
||||
# called ${cpp_class} - which may be a namespace-qualified class name.
|
||||
function(tfcompile model tag_set signature_def_key fname cpp_class)
|
||||
tfgetmodel(${model} LLVM_ML_MODELS_ABSOLUTE)
|
||||
message("Using model at " ${LLVM_ML_MODELS_ABSOLUTE})
|
||||
set(prefix ${CMAKE_CURRENT_BINARY_DIR}/${fname})
|
||||
set(obj_file ${prefix}.o)
|
||||
set(hdr_file ${prefix}.h)
|
||||
add_custom_command(OUTPUT ${obj_file} ${hdr_file}
|
||||
COMMAND "XLA_FLAGS=\"--xla_cpu_multi_thread_eigen=false\"" ${TENSORFLOW_AOT_COMPILER} aot_compile_cpu
|
||||
--dir ${LLVM_ML_MODELS_ABSOLUTE}
|
||||
--tag_set ${tag_set}
|
||||
--signature_def_key ${signature_def_key}
|
||||
--output_prefix ${prefix}
|
||||
--cpp_class ${cpp_class}
|
||||
--target_triple ${LLVM_HOST_TRIPLE}
|
||||
)
|
||||
string(TOUPPER ${fname} fname_allcaps)
|
||||
set(override_header ${LLVM_OVERRIDE_MODEL_HEADER_${fname_allcaps}})
|
||||
set(override_object ${LLVM_OVERRIDE_MODEL_OBJECT_${fname_allcaps}})
|
||||
if (EXISTS "${override_header}" AND EXISTS "${override_object}")
|
||||
configure_file(${override_header} ${hdr_file} COPYONLY)
|
||||
configure_file(${override_object} ${obj_file} COPYONLY)
|
||||
message("Using provided header "
|
||||
${hdr_file} " and object " ${obj_file}
|
||||
" files for model " ${model})
|
||||
else()
|
||||
tfgetmodel(${model} LLVM_ML_MODELS_ABSOLUTE)
|
||||
message("Using model at " ${LLVM_ML_MODELS_ABSOLUTE})
|
||||
add_custom_command(OUTPUT ${obj_file} ${hdr_file}
|
||||
COMMAND "XLA_FLAGS=\"--xla_cpu_multi_thread_eigen=false\"" ${TENSORFLOW_AOT_COMPILER} aot_compile_cpu
|
||||
--dir ${LLVM_ML_MODELS_ABSOLUTE}
|
||||
--tag_set ${tag_set}
|
||||
--signature_def_key ${signature_def_key}
|
||||
--output_prefix ${prefix}
|
||||
--cpp_class ${cpp_class}
|
||||
--target_triple ${LLVM_HOST_TRIPLE}
|
||||
)
|
||||
endif()
|
||||
|
||||
# Aggregate the objects so that results of different tfcompile calls may be
|
||||
# grouped into one target.
|
||||
|
Loading…
Reference in New Issue
Block a user