mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 02:33:06 +01:00
Remove ML inlining model artifacts.
They are not conducive to being stored in git. Instead, we autogenerate mock model artifacts for use in tests. Production models can be specified with the cmake flag LLVM_INLINER_MODEL_PATH. LLVM_INLINER_MODEL_PATH has two sentinel values: - download, which will download the most recent compatible model. - autogenerate, which will autogenerate a "fake" model for testing the model uptake infrastructure. Differential Revision: https://reviews.llvm.org/D104251
This commit is contained in:
parent
b329f1bc69
commit
c371e1f8a1
@ -1,14 +1,45 @@
|
||||
# Ensure the ${model} is available at ${final_path}.
|
||||
#
|
||||
function(tfgetmodel model final_path)
|
||||
if (IS_ABSOLUTE ${model})
|
||||
set(${final_path} ${model} PARENT_SCOPE)
|
||||
function(tf_get_absolute_path path base final_path)
|
||||
if (IS_ABSOLUTE ${path})
|
||||
set(${final_path} ${path} PARENT_SCOPE)
|
||||
else()
|
||||
set(${final_path}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/${model} PARENT_SCOPE)
|
||||
set(${final_path} ${base}/${path} PARENT_SCOPE)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(tf_get_model model final_path)
|
||||
string(FIND ${model} "http:" pos_http)
|
||||
string(FIND ${model} "https:" pos_https)
|
||||
if (${pos_http} EQUAL 0 OR ${pos_https} EQUAL 0)
|
||||
message("Downloading model " ${model})
|
||||
string(FIND ${model} "/" fname_start REVERSE)
|
||||
math(EXPR fname_start "${fname_start}+1")
|
||||
string(SUBSTRING ${model} ${fname_start}+1 -1 fname)
|
||||
message("Model archive: " ${fname})
|
||||
file(DOWNLOAD ${model} ${CMAKE_CURRENT_BINARY_DIR}/${fname})
|
||||
file(ARCHIVE_EXTRACT INPUT
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${fname}
|
||||
DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/${fname}_model)
|
||||
set(${final_path} ${CMAKE_CURRENT_BINARY_DIR}/${fname}_model/model PARENT_SCOPE)
|
||||
else()
|
||||
tf_get_absolute_path(${model} ${CMAKE_CURRENT_BINARY_DIR} model_path)
|
||||
set(${final_path} ${model_path} PARENT_SCOPE)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
# Generate a mock model for tests.
|
||||
function(generate_mock_model model generate_mock_model_py config)
|
||||
tf_get_absolute_path(${model} ${CMAKE_CURRENT_BINARY_DIR} LLVM_ML_MODELS_ABSOLUTE)
|
||||
tf_get_absolute_path(${generate_mock_model_py} ${CMAKE_CURRENT_SOURCE_DIR} GENERATE_INLINING_MODEL_ABSOLUTE)
|
||||
tf_get_absolute_path(${config} ${CMAKE_CURRENT_SOURCE_DIR} LLVM_ML_MODEL_CONFIG_ABSOLUTE)
|
||||
message(WARNING "Autogenerated mock models should not be used in production builds.")
|
||||
execute_process(COMMAND python3
|
||||
${GENERATE_INLINING_MODEL_ABSOLUTE}
|
||||
${LLVM_ML_MODEL_CONFIG_ABSOLUTE}
|
||||
${LLVM_ML_MODELS_ABSOLUTE}-autogenerated
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
)
|
||||
endfunction()
|
||||
|
||||
# Run the tensorflow compiler (saved_model_cli) on the saved model in the
|
||||
# ${model} directory, looking for the ${tag_set} tag set, and the SignatureDef
|
||||
# ${signature_def_key}.
|
||||
@ -29,7 +60,7 @@ function(tfcompile model tag_set signature_def_key fname cpp_class)
|
||||
${hdr_file} " and object " ${obj_file}
|
||||
" files for model " ${model})
|
||||
else()
|
||||
tfgetmodel(${model} LLVM_ML_MODELS_ABSOLUTE)
|
||||
tf_get_absolute_path(${model} ${CMAKE_CURRENT_BINARY_DIR} LLVM_ML_MODELS_ABSOLUTE)
|
||||
message("Using model at " ${LLVM_ML_MODELS_ABSOLUTE})
|
||||
add_custom_command(OUTPUT ${obj_file} ${hdr_file}
|
||||
COMMAND ${TENSORFLOW_AOT_COMPILER} aot_compile_cpu
|
||||
@ -52,5 +83,32 @@ function(tfcompile model tag_set signature_def_key fname cpp_class)
|
||||
set(GENERATED_HEADERS ${GENERATED_HEADERS} ${hdr_file} PARENT_SCOPE)
|
||||
set_source_files_properties(${hdr_file} PROPERTIES
|
||||
GENERATED 1)
|
||||
|
||||
|
||||
endfunction()
|
||||
|
||||
function(tf_find_and_compile model default_url default_path generation_config tag_set signature_def_key fname cpp_class)
|
||||
if ("${model}" STREQUAL "download")
|
||||
set(model ${default_url})
|
||||
endif()
|
||||
|
||||
if ("${model}" STREQUAL "autogenerate")
|
||||
generate_mock_model(${default_path} models/generate_mock_model.py ${generation_config})
|
||||
set(model ${default_path}-autogenerated)
|
||||
endif()
|
||||
|
||||
tf_get_model(${model} LLVM_ML_MODELS_ABSOLUTE)
|
||||
tfcompile(${LLVM_ML_MODELS_ABSOLUTE} ${tag_set} ${signature_def_key} ${fname} ${cpp_class})
|
||||
|
||||
set(GENERATED_OBJS ${GENERATED_OBJS} ${obj_file} PARENT_SCOPE)
|
||||
set_source_files_properties(${obj_file} PROPERTIES
|
||||
GENERATED 1 EXTERNAL_OBJECT 1)
|
||||
|
||||
set(GENERATED_HEADERS ${GENERATED_HEADERS} ${hdr_file} PARENT_SCOPE)
|
||||
set_source_files_properties(${hdr_file} PROPERTIES
|
||||
GENERATED 1)
|
||||
|
||||
set(GeneratedMLSources ${GeneratedMLSources} ${GENERATED_HEADERS} PARENT_SCOPE)
|
||||
set(MLDeps ${MLDeps} tf_xla_runtime PARENT_SCOPE)
|
||||
set(MLLinkDeps ${MLLinkDeps} tf_xla_runtime ${GENERATED_OBJS} PARENT_SCOPE)
|
||||
|
||||
endfunction()
|
||||
|
@ -1,19 +1,33 @@
|
||||
if (DEFINED LLVM_HAVE_TF_AOT OR DEFINED LLVM_HAVE_TF_API)
|
||||
include(TensorFlowCompile)
|
||||
set(LLVM_INLINER_MODEL_PATH_DEFAULT "models/inliner-Oz")
|
||||
|
||||
# This url points to the most recent most which is known to be compatible with
|
||||
# LLVM. When better models are published, this url should be updated to aid
|
||||
# discoverability.
|
||||
set(LLVM_INLINER_MODEL_CURRENT_URL "https://github.com/google/ml-compiler-opt/releases/download/inlining-Oz-v0.1/inlining-Oz-acabaf6-v0.1.tar.gz")
|
||||
|
||||
if (DEFINED LLVM_HAVE_TF_AOT)
|
||||
set(LLVM_INLINER_MODEL_PATH "models/inliner"
|
||||
CACHE STRING
|
||||
"ML-driven inliner policy location (path to saved model)")
|
||||
include(TensorFlowCompile)
|
||||
tfcompile(${LLVM_INLINER_MODEL_PATH} serve action InlinerSizeModel llvm::InlinerSizeModel)
|
||||
list(APPEND GeneratedMLSources
|
||||
${GENERATED_HEADERS}
|
||||
# If the path is empty, autogenerate the model
|
||||
if (NOT DEFINED LLVM_INLINER_MODEL_PATH OR "${LLVM_INLINER_MODEL_PATH}" STREQUAL "")
|
||||
set(LLVM_INLINER_MODEL_PATH "autogenerate")
|
||||
message(WARNING "LLVM_INLINER_MODEL_PATH was not set: autogenerating a model to finish the build.")
|
||||
endif()
|
||||
|
||||
tf_find_and_compile(
|
||||
${LLVM_INLINER_MODEL_PATH}
|
||||
${LLVM_INLINER_MODEL_CURRENT_URL}
|
||||
${LLVM_INLINER_MODEL_PATH_DEFAULT}
|
||||
"models/inlining/config.py"
|
||||
serve
|
||||
action
|
||||
InlinerSizeModel
|
||||
llvm::InlinerSizeModel
|
||||
)
|
||||
LIST(APPEND MLDeps tf_xla_runtime)
|
||||
LIST(APPEND MLLinkDeps tf_xla_runtime ${GENERATED_OBJS})
|
||||
endif()
|
||||
|
||||
if (DEFINED LLVM_HAVE_TF_API)
|
||||
LIST(APPEND MLLinkDeps ${tensorflow_c_api})
|
||||
list(APPEND MLLinkDeps ${tensorflow_c_api})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
69
lib/Analysis/models/generate_mock_model.py
Normal file
69
lib/Analysis/models/generate_mock_model.py
Normal file
@ -0,0 +1,69 @@
|
||||
"""Generate a mock model for LLVM tests.
|
||||
|
||||
The generated model is not a neural net - it is just a tf.function with the
|
||||
correct input and output parameters. By construction, the mock model will always
|
||||
output 1.
|
||||
"""
|
||||
|
||||
import os
|
||||
import importlib.util
|
||||
import sys
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
|
||||
def get_output_spec_path(path):
|
||||
return os.path.join(path, 'output_spec.json')
|
||||
|
||||
|
||||
def build_mock_model(path, signature):
|
||||
"""Build and save the mock model with the given signature"""
|
||||
module = tf.Module()
|
||||
|
||||
# We have to set this useless variable in order for the TF C API to correctly
|
||||
# intake it
|
||||
module.var = tf.Variable(0.)
|
||||
|
||||
def action(*inputs):
|
||||
s = tf.reduce_sum([tf.cast(x, tf.float32) for x in tf.nest.flatten(inputs)])
|
||||
return {signature['output']: float('inf') + s + module.var}
|
||||
|
||||
module.action = tf.function()(action)
|
||||
action = {'action': module.action.get_concrete_function(signature['inputs'])}
|
||||
tf.saved_model.save(module, path, signatures=action)
|
||||
|
||||
output_spec_path = get_output_spec_path(path)
|
||||
with open(output_spec_path, 'w') as f:
|
||||
print(f'Writing output spec to {output_spec_path}.')
|
||||
f.write(signature['output_spec'])
|
||||
|
||||
|
||||
def get_external_signature(config_path):
|
||||
"""Get the signature for the desired model.
|
||||
|
||||
We manually import the python file at config_path to avoid adding a gin
|
||||
dependency to the LLVM build.
|
||||
"""
|
||||
spec = importlib.util.spec_from_file_location('config', config_path)
|
||||
config = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(config)
|
||||
|
||||
return {
|
||||
'inputs': config.get_input_signature(),
|
||||
'output': config.get_output_signature(),
|
||||
'output_spec': config.get_output_spec()
|
||||
}
|
||||
|
||||
|
||||
def main(argv):
|
||||
assert len(argv) == 3
|
||||
config_path = argv[1]
|
||||
model_path = argv[2]
|
||||
|
||||
print(f'Using config file at [{argv[1]}]')
|
||||
signature = get_external_signature(config_path)
|
||||
build_mock_model(model_path, signature)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
@ -1,3 +0,0 @@
|
||||
Reference model for inliner -Oz decision policy.
|
||||
Note that, currently, this model is also referenced by test/Transforms/Inline/ML
|
||||
tests - if replacing it, check those tests, too.
|
@ -1,14 +0,0 @@
|
||||
[
|
||||
{
|
||||
"logging_name": "inlining_decision",
|
||||
"tensor_spec": {
|
||||
"name": "StatefulPartitionedCall",
|
||||
"port": 0,
|
||||
"type": "int64_t",
|
||||
"shape": [
|
||||
1
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
56
lib/Analysis/models/inlining/config.py
Normal file
56
lib/Analysis/models/inlining/config.py
Normal file
@ -0,0 +1,56 @@
|
||||
"""Inlining Training config."""
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
POLICY_DECISION_LABEL = 'inlining_decision'
|
||||
POLICY_OUTPUT_SPEC = """
|
||||
[
|
||||
{
|
||||
"logging_name": "inlining_decision",
|
||||
"tensor_spec": {
|
||||
"name": "StatefulPartitionedCall",
|
||||
"port": 0,
|
||||
"type": "int64_t",
|
||||
"shape": [
|
||||
1
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
"""
|
||||
|
||||
|
||||
# pylint: disable=g-complex-comprehension
|
||||
def get_input_signature():
|
||||
"""Returns the list of features for LLVM inlining."""
|
||||
# int64 features
|
||||
inputs = [
|
||||
tf.TensorSpec(dtype=tf.int64, shape=(), name=key) for key in [
|
||||
'caller_basic_block_count', 'caller_conditionally_executed_blocks',
|
||||
'caller_users', 'callee_basic_block_count',
|
||||
'callee_conditionally_executed_blocks', 'callee_users',
|
||||
'nr_ctant_params', 'node_count', 'edge_count', 'callsite_height',
|
||||
'cost_estimate', 'inlining_default'
|
||||
]
|
||||
]
|
||||
|
||||
# float32 features
|
||||
inputs.extend([
|
||||
tf.TensorSpec(dtype=tf.float32, shape=(), name=key)
|
||||
for key in ['discount', 'reward']
|
||||
])
|
||||
|
||||
# int32 features
|
||||
inputs.extend([
|
||||
tf.TensorSpec(dtype=tf.int32, shape=(), name=key)
|
||||
for key in ['step_type']
|
||||
])
|
||||
return inputs
|
||||
|
||||
|
||||
def get_output_signature():
|
||||
return POLICY_DECISION_LABEL
|
||||
|
||||
|
||||
def get_output_spec():
|
||||
return POLICY_OUTPUT_SPEC
|
@ -6,19 +6,23 @@
|
||||
; factor, we penalize the 'bad' decision.
|
||||
; REQUIRES: have_tf_api
|
||||
;
|
||||
; Generate mock model
|
||||
; RUN: rm -rf %t && mkdir %t
|
||||
; RUN: python3 %S/../../../../lib/Analysis/models/generate_mock_model.py %S/../../../../lib/Analysis/models/inlining/config.py %t
|
||||
;
|
||||
; When the bounds are very wide ("no bounds"), all inlinings happen.
|
||||
; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%S/../../../../lib/Analysis/models/inliner -training-log=- -enable-ml-inliner=development -ml-advisor-size-increase-threshold=10.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=NOBOUNDS
|
||||
; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=- -enable-ml-inliner=development -ml-advisor-size-increase-threshold=10.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=NOBOUNDS
|
||||
;
|
||||
; When the bounds are very restrictive, the first inlining happens but it's
|
||||
; considered as "bad" (since it trips over the bounds) and its reward is a
|
||||
; penalty. However, the mandatory inlining, which is considered next, happens.
|
||||
; No other inlinings happend.
|
||||
; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%S/../../../../lib/Analysis/models/inliner -training-log=- -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=BOUNDS
|
||||
; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=- -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=BOUNDS
|
||||
;
|
||||
; With more restrictive bounds, the first inlining happens and is OK. The
|
||||
; mandatory inlining happens next, and it trips over the bounds, which then
|
||||
; forces no further inlinings.
|
||||
; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%S/../../../../lib/Analysis/models/inliner -training-log=- -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.1 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=RELAXED-BOUNDS
|
||||
; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=- -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.1 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=RELAXED-BOUNDS
|
||||
|
||||
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
|
||||
target triple = "x86_64-grtev4-linux-gnu"
|
||||
@ -61,4 +65,4 @@ attributes #0 = { alwaysinline }
|
||||
; it won't be inlined.
|
||||
; NOBOUNDS-NOT: @may_not_be_inlined
|
||||
; RELAXED-BOUNDS: call i64 @may_not_be_inlined
|
||||
; BOUNDS: call i64 @may_not_be_inlined
|
||||
; BOUNDS: call i64 @may_not_be_inlined
|
||||
|
@ -1,9 +1,13 @@
|
||||
; Test that we can produce a log if we have or do not have a model, in development mode.
|
||||
; REQUIRES: have_tf_api
|
||||
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%S/../../../../lib/Analysis/models/inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s
|
||||
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%S/../../../../lib/Analysis/models/inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-output-spec-override=%S/Inputs/test_output_spec.json -S < %s | FileCheck %s --check-prefixes=EXTRA-OUTPUTS,CHECK
|
||||
; Generate mock model
|
||||
; RUN: rm -rf %t && mkdir %t
|
||||
; RUN: python3 %S/../../../../lib/Analysis/models/generate_mock_model.py %S/../../../../lib/Analysis/models/inlining/config.py %t
|
||||
;
|
||||
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s
|
||||
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-output-spec-override=%S/Inputs/test_output_spec.json -S < %s | FileCheck %s --check-prefixes=EXTRA-OUTPUTS,CHECK
|
||||
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s
|
||||
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%S/../../../../lib/Analysis/models/inliner -S < %s | FileCheck %s --check-prefix=NOREWARD
|
||||
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%t -S < %s | FileCheck %s --check-prefix=NOREWARD
|
||||
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -S < %s | FileCheck %s --check-prefix=NOREWARD
|
||||
|
||||
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
||||
@ -44,7 +48,7 @@ define dso_local i32 @top() {
|
||||
; Check we produce a protobuf that has inlining decisions and rewards.
|
||||
; CHECK-NOT: fake_extra_output
|
||||
; EXTRA-OUTPUTS: key: "fake_extra_output" value: {
|
||||
; EXTRA-OUTPUTS-NEXT: feature: { int64_list: { value: [1] } }
|
||||
; EXTRA-OUTPUTS-NEXT: feature: { int64_list: { value: [{{[0-9]+}}] } }
|
||||
; CHECK: key: "inlining_decision" value: {
|
||||
; CHECK-NEXT: feature: { int64_list: { value: [1] } }
|
||||
; CHECK: key: "delta_size" value: {
|
||||
|
@ -6,5 +6,7 @@
|
||||
; for the 'release' mode.
|
||||
;
|
||||
; REQUIRES: have_tf_api
|
||||
; RUN: rm -rf %t && mkdir %t
|
||||
; RUN: python3 %S/../../../../lib/Analysis/models/generate_mock_model.py %S/../../../../lib/Analysis/models/inlining/config.py %t
|
||||
; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=default -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=DEFAULT
|
||||
; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=development -ml-inliner-model-under-training=%S/../../../../lib/Analysis/models/inliner -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=CHECK
|
||||
; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=development -ml-inliner-model-under-training=%t -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=CHECK
|
||||
|
Loading…
Reference in New Issue
Block a user